From 86fc13ec765947b5496d77527c8713ba0704320c Mon Sep 17 00:00:00 2001 From: Will Tomlin <76996781+witomlin@users.noreply.github.com> Date: Thu, 12 Dec 2024 10:10:03 +0000 Subject: [PATCH] Support for Kube 1.32 (#14) --- .github/workflows/tests.yaml | 6 +- CHANGELOG.md | 35 +++ Dockerfile | 2 +- README.md | 104 ++++--- .../container-startup-autoscaler/CHANGELOG.md | 13 + .../container-startup-autoscaler/Chart.yaml | 4 +- .../templates/clusterrole.yaml | 3 + cmd/container-startup-autoscaler/main.go | 5 +- go.mod | 98 +++---- go.sum | 266 ++++++++---------- internal/controller/controller.go | 148 +++++----- internal/controller/controller_test.go | 44 +-- .../controller/controllercommon/config.go | 2 +- internal/controller/predicatefunc.go | 14 +- internal/controller/predicatefunc_test.go | 16 +- internal/logging/log_test.go | 1 - .../metrics/informercache/informercache.go | 71 +++++ .../informercache/informercache_test.go | 88 ++++++ internal/metrics/metricscommon/const.go | 7 +- internal/metrics/reconciler/reconciler.go | 39 +-- .../metrics/reconciler/reconciler_test.go | 9 +- internal/metrics/registry.go | 17 +- internal/metrics/registry_test.go | 28 +- internal/metrics/retry/kubeapi.go | 15 +- internal/metrics/retry/kubeapi_test.go | 9 +- internal/metrics/scale/scale.go | 23 +- internal/metrics/scale/scale_test.go | 9 +- internal/pod/containerkubehelper.go | 27 -- internal/pod/containerkubehelper_test.go | 85 ------ internal/pod/error.go | 14 +- internal/pod/error_test.go | 9 - internal/pod/kubehelper.go | 104 +++++-- internal/pod/kubehelper_test.go | 230 ++++++++++----- internal/pod/podcommon/stateconst.go | 17 -- internal/pod/podcommon/states.go | 46 ++- internal/pod/podcommon/states_test.go | 3 - .../pod/podcommon/statusannotation_test.go | 8 +- .../pod/podtest/mockcontainerkubehelper.go | 14 - internal/pod/podtest/mockkubehelper.go | 13 +- internal/pod/podtest/mockstatus.go | 10 +- .../pod/podtest/mocktargetcontainerstate.go | 1 - internal/pod/podtest/pod.go | 52 ++-- internal/pod/podtest/podbuilder.go | 10 - internal/pod/podtest/podinterceptor.go | 47 +++- internal/pod/status.go | 17 +- internal/pod/status_test.go | 10 +- internal/pod/targetcontaineraction.go | 61 +--- internal/pod/targetcontaineraction_test.go | 188 ++++--------- internal/pod/targetcontainerstate.go | 45 +-- internal/pod/targetcontainerstate_test.go | 142 ---------- scripts/sandbox/config/vars.sh | 2 +- scripts/sandbox/csa-install.sh | 53 +++- scripts/sandbox/extracacert/Dockerfile | 6 + test/integration/consts.go | 2 +- test/integration/csa.go | 2 +- test/integration/extracacert/Dockerfile | 6 + test/integration/integration_test.go | 23 +- test/integration/kind.go | 91 +++++- test/integration/suppliedconfig.go | 34 ++- 59 files changed, 1195 insertions(+), 1253 deletions(-) create mode 100644 internal/metrics/informercache/informercache.go create mode 100644 internal/metrics/informercache/informercache_test.go create mode 100644 scripts/sandbox/extracacert/Dockerfile create mode 100644 test/integration/extracacert/Dockerfile diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 7894e89..b646380 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.22.9' + go-version: '1.23.3' - name: Test run: make test-run-unit @@ -29,7 +29,7 @@ jobs: strategy: matrix: arg: - - '1.31' + - '1.32' steps: - name: Checkout repository @@ -40,7 +40,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.22.9' + go-version: '1.23.3' - name: Set up Kind uses: helm/kind-action@v1.10.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 9612904..69d45cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,41 @@ - Based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 0.5.0 +2024-12-12 + +### Added +- Support for Kube 1.32. + - Container resizes now performed through `resize` subresource. +- Ability to register an additional CA certificate (or chain) when building the kind node image for integration tests + and sandbox scripts. + +### Changed +- Upgrades Go to 1.23.3. +- Upgrades all dependencies. +- Renames controller-runtime controller name to shorten. + +### Removed +- Examination of `AllocatedResources` within container status. + - Not required and now behind feature gate in Kube 1.32. +- Controller name label from CSA metrics. + +### Fixed +- Inconsistent status updates through informer cache race. +- CSA metrics not being published. + +### Helm Chart +[1.4.0](charts/container-startup-autoscaler/CHANGELOG.md#140) + +| Kube Version | Compatible? | `In-place Update of Pod Resources` Maturity | +|:------------:|:-----------:|:-------------------------------------------:| +| 1.32 | ✔️ | Alpha | +| 1.31 | ❌ | Alpha | +| 1.30 | ❌ | Alpha | +| 1.29 | ❌ | Alpha | +| 1.28 | ❌ | Alpha | +| 1.27 | ❌ | Alpha | + ## 0.4.0 2024-11-29 diff --git a/Dockerfile b/Dockerfile index 426d6d9..3cf5a36 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM --platform=$BUILDPLATFORM golang:1.22.9 AS build +FROM --platform=$BUILDPLATFORM golang:1.23.3 AS build ARG BUILDPLATFORM ARG TARGETPLATFORM ARG TARGETOS diff --git a/README.md b/README.md index e581073..d3341fa 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ works with deployments, statefulsets, daemonsets and other workload management A CSA is implemented using [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime). CSA is built around Kube's [In-place Update of Pod Resources](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources) -feature, which is currently in alpha state as of Kubernetes 1.31 and therefore requires the `InPlacePodVerticalScaling` +feature, which is currently in alpha state as of Kubernetes 1.32 and therefore requires the `InPlacePodVerticalScaling` feature gate to be enabled. Beta/stable targets are indicated [here](https://github.com/kubernetes/enhancements/issues/1287). The feature implementation (along with the corresponding implementation of CSA) is likely to change until it reaches stable status. See [CHANGELOG.md](CHANGELOG.md) for details of CSA versions and Kubernetes version compatibility. @@ -44,8 +44,10 @@ non-production Kubernetes clusters. * [Reconciler](#reconciler) * [Scale](#scale) * [Kube API Retry](#kube-api-retry) + * [Informer Cache](#informer-cache) * [Retry](#retry) * [Kube API](#kube-api) + * [Informer Cache Sync](#informer-cache-sync) * [Encountering Unknown Resources](#encountering-unknown-resources) * [CSA Configuration](#csa-configuration) * [Controller](#controller) @@ -153,11 +155,11 @@ scaled pod. ## Limitations The following limitations are currently in place: -- Originally admitted target container resources must be guaranteed (`requests` == `limits`) to match the guaranteed - nature of startup resources - Kube API currently rejects any change in resource QoS. This should be addressed as the - `In-place Update of Pod Resources` feature matures. -- Post-startup resources must be guaranteed (`requests` == `limits`) to match the guaranteed nature of startup - resources per above. +- Originally admitted target container resources must be guaranteed (`requests` == `limits`) because CSA only allows + guaranteed resources for its startup settings and the `In-place Update of Pod Resources` feature + [does not currently allow](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources#qos-class) + changing QoS class. +- Post-startup resources must also currently be guaranteed (`requests` == `limits`) to avoid a changing QoS class. - Failed target container scales are not re-attempted. ## Restrictions @@ -243,7 +245,6 @@ Example output: "started": "true", "ready": "false", "resources": "poststartup", - "allocatedResources": "containerrequestsmatch", "statusResources": "containerresourcesmatch" }, "scale": { @@ -267,8 +268,7 @@ Explanation of status items: | `states` | `started` | Whether the container is signalled as started by Kube. | | `states` | `ready` | Whether the container is signalled as ready by Kube. | | `states` | `resources` | The type of resources (startup/post-startup) that are currently applied (but not necessarily enacted). | -| `states` | `allocatedResources` | How the reported container allocated resources relate to container requests. | -| `states` | `statusResources` | How the reported currently allocated resources relate to container resources. | +| `states` | `statusResources` | How the reported current enacted resources relate to container resources. | | `scale` | - | Information around scaling activity. | | `scale` | `lastCommanded` | The last time a scale was commanded (UTC). | | `scale` | `lastEnacted` | The last time a scale was enacted (UTC; empty if failed). | @@ -306,7 +306,7 @@ Example `info`-level log: ```json { "level": "info", - "controller": "container-startup-autoscaler", + "controller": "csa", "namespace": "echoserver", "name": "echoserver-5f65d8f65d-mvqt8", "reconcileID": "6157dd49-7aa9-4cac-bbaf-a739fa48cc61", @@ -318,7 +318,6 @@ Example `info`-level log: "started": "true", "ready": "false", "resources": "poststartup", - "allocatedResources": "containerrequestsmatch", "statusResources": "containerresourcesmatch" }, "caller": "container-startup-autoscaler/internal/pod/targetcontaineraction.go:472", @@ -341,30 +340,26 @@ values. ### Reconciler Prefixed with `csa_reconciler_`: -| Metric Name | Type | Labels | Description | -|--------------------------------|---------|--------------|----------------------------------------------------------------------------------------------------------------------| -| `skipped_only_status_change` | Counter | `controller` | Number of reconciles that were skipped because only the scaler controller status changed. | -| `existing_in_progress` | Counter | `controller` | Number of attempted reconciles where one was already in progress for the same namespace/name (results in a requeue). | -| `failure_unable_to_get_pod` | Counter | `controller` | Number of reconciles where there was a failure to get the pod (results in a requeue). | -| `failure_pod_doesnt_exist` | Counter | `controller` | Number of reconciles where the pod was found not to exist (results in failure). | -| `failure_validation` | Counter | `controller` | Number of reconciles where there was a failure to validate (results in failure). | -| `failure_states_determination` | Counter | `controller` | Number of reconciles where there was a failure to determine states (results in failure). | -| `failure_states_action` | Counter | `controller` | Number of reconciles where there was a failure to action the determined states (results in failure). | - -Labels: -- `controller`: the CSA controller name. +| Metric Name | Type | Labels | Description | +|--------------------------------|---------|--------|----------------------------------------------------------------------------------------------------------------------| +| `skipped_only_status_change` | Counter | None | Number of reconciles that were skipped because only the scaler controller status changed. | +| `existing_in_progress` | Counter | None | Number of attempted reconciles where one was already in progress for the same namespace/name (results in a requeue). | +| `failure_unable_to_get_pod` | Counter | None | Number of reconciles where there was a failure to get the pod (results in a requeue). | +| `failure_pod_doesnt_exist` | Counter | None | Number of reconciles where the pod was found not to exist (results in failure). | +| `failure_validation` | Counter | None | Number of reconciles where there was a failure to validate (results in failure). | +| `failure_states_determination` | Counter | None | Number of reconciles where there was a failure to determine states (results in failure). | +| `failure_states_action` | Counter | None | Number of reconciles where there was a failure to action the determined states (results in failure). | ### Scale Prefixed with `csa_scale_`: -| Metric Name | Type | Labels | Description | -|-------------------------------|-----------|--------------------------------------|---------------------------------------------------------------------------------------------------------------| -| `failure` | Counter | `controller`, `direction`, `reason` | Number of scale failures. | -| `commanded_unknown_resources` | Counter | `controller` | Number of scales commanded upon encountering unknown resources (see [here](#encountering-unknown-resources)). | -| `duration_seconds` | Histogram | `controller`, `direction`, `outcome` | Scale duration (from commanded to enacted). | +| Metric Name | Type | Labels | Description | +|-------------------------------|-----------|------------------------|---------------------------------------------------------------------------------------------------------------| +| `failure` | Counter | `direction`, `reason` | Number of scale failures. | +| `commanded_unknown_resources` | Counter | None | Number of scales commanded upon encountering unknown resources (see [here](#encountering-unknown-resources)). | +| `duration_seconds` | Histogram | `direction`, `outcome` | Scale duration (from commanded to enacted). | Labels: -- `controller`: the CSA controller name. - `direction`: the direction of the scale - `up`/`down`. - `reason`: the reason why the scale failed. - `outcome`: the outcome of the scale - `success`/`failure`. @@ -372,16 +367,25 @@ Labels: ### Kube API Retry Prefixed with `csa_retrykubeapi_`: -| Metric Name | Type | Labels | Description | -|-------------|---------|------------------------|-----------------------------| -| `retry` | Counter | `controller`, `reason` | Number of Kube API retries. | +| Metric Name | Type | Labels | Description | +|-------------|---------|----------|-----------------------------| +| `retry` | Counter | `reason` | Number of Kube API retries. | Labels: -- `controller`: the CSA controller name. - `reason`: the Kube API response that caused a retry to occur. See [below](#retry) for more information on retries. +### Informer Cache +Prefixed with `csa_informercache_`: + +| Metric Name | Type | Labels | Description | +|----------------|-----------|--------|---------------------------------------------------------------------------------------------------------------------------------------------| +| `sync_poll` | Histogram | None | Number of informer cache sync polls after a pod mutation was performed via the Kube API. | +| `sync_timeout` | Counter | None | Number of informer cache sync timeouts after a pod mutation was performed via the Kube API (may result in inconsistent CSA status updates). | + +See [below](#informer-cache-sync) for more information on informer cache syncs. + ## Retry ### Kube API Unless Kube API reports that a pod is not found upon trying to retrieve it, all Kube API interactions are subject to @@ -390,6 +394,26 @@ retry according to CSA retry [configuration](#csa-configuration). CSA handles situations where Kube API reports a conflict upon a pod update. In this case, CSA retrieves the latest version of the pod and reapplies the update, before trying again (subject to retry configuration). +## Informer Cache Sync +The CSA [status](#status) includes timestamps that CSA uses itself internally, such as for calculating scale durations. +When status is updated, CSA waits for the updated pod to be reflected in the informer cache prior to finishing +the reconcile to ensure following reconciles have the latest status available to work upon. Without this mechanism, the +rapid pace of pod updates during resizes can prevent subsequent reconciles from retrieving the latest status. This +occurs because the informer may not have cached the updated pod in time, resulting in inaccurate status updates. + +The CSA reconciler doesn't allow concurrent reconciles for same pod so subsequent reconciles will not start until this +wait described above has completed. + +The informer cache metrics described [above](#informer-cache) provide insight into how quickly the informer cache is +updated (synced) after status is updated, and whether any timeouts occur: + +- `patch_sync_poll`: the number of cache polls that were required to confirm the cache was populated with the updated + pod. The cache is polled periodically per the `waitForCacheUpdatePollMillis` configuration [here](internal/pod/kubehelper.go). + Higher values indicate longer cache sync times. +- `patch_sync_timeout`: the number of times the cache sync timed out per the`waitForCacheUpdateTimeoutMillis` + configuration [here](internal/pod/kubehelper.go). Timeouts do not result in an error or termination of the reconcile, + but may result in inconsistent CSA status updates. + ## Encountering Unknown Resources By default, CSA will yield an error if it encounters resources applied to a target container that it doesn't recognize i.e. resources other than those specified within the pod startup or post-startup resource [annotations](#annotations). This may @@ -418,7 +442,7 @@ All configuration flags are always logged upon CSA start. | `--leader-election-resource-namespace` | String | - | The namespace to create resources in if leader election is enabled (uses current namespace if not supplied). | | `--cache-sync-period-mins` | Integer | `60` | How frequently the informer should re-sync. | | `--graceful-shutdown-timeout-secs` | Integer | `10` | How long to allow busy workers to complete upon shutdown. | -| `--requeue-duration-secs` | Integer | `3` | How long to wait before requeuing a reconcile. | +| `--requeue-duration-secs` | Integer | `1` | How long to wait before requeuing a reconcile. | | `--max-concurrent-reconciles` | Integer | `10` | The maximum number of concurrent reconciles. | | `--scale-when-unknown-resources` | Boolean | `false` | Whether to scale when [unknown resources](#encountering-unknown-resources) are encountered. | @@ -514,12 +538,13 @@ Integration tests are implemented as Go tests and located in `test/integration`. The integration tests use [echo-server](https://github.com/Ealenn/Echo-Server) for containers. Note: the very first execution might take some time to complete. -A number of environment variable-based configuration options are available: +A number of environment variable-based configuration items are available: | Name | Default | Description | |--------------------------|---------|--------------------------------------------------------------------------------------------------------------------------------------| | `KUBE_VERSION` | - | The _major.minor_ version of Kube to run tests against e.g. `1.31`. | | `MAX_PARALLELISM` | `4` | The maximum number of tests that can run in parallel. | +| `EXTRA_CA_CERT_PATH` | - | See below. | | `REUSE_CLUSTER` | `false` | Whether to reuse an existing CSA kind cluster (if it already exists). `KUBE_VERSION` has no effect if an existing cluster is reused. | | `INSTALL_METRICS_SERVER` | `false` | Whether to install metrics-server. | | `KEEP_CSA` | `false` | Whether to keep the CSA installation after tests finish. | @@ -531,6 +556,10 @@ namespace (but using the same single CSA installation). If local resources are l accordingly and ensure `DELETE_NS_AFTER_TEST` is `true`. Each test typically spins up 2 pods, each with 2 containers; see source for resource allocations. +`EXTRA_CA_CERT_PATH` is an optional configuration item that allows registration of an additional CA certificate +(or chain) when building the kind node image. This will be required if a technology that intercepts encrypted network +traffic via insertion of its own CA is being used. The path must be absolute and reference a PEM-formatted file. + ## Running Locally A number of Bash scripts are supplied in the `scripts/sandbox` directory that allow you to try out CSA using [echo-server](https://github.com/Ealenn/Echo-Server). The scripts are similar in nature to the setup/teardown work @@ -551,7 +580,10 @@ Executing `csa-install.sh`: - [Leader election](#controller) is enabled; 2 pods are created. - [Log verbosity level](#log) is `2` (trace). -Note: the very first execution might take some time to complete. +Note: +- To register an additional CA certificate (or chain) when building the kind node image as described + [above](#integration), pass `--extra-ca-cert-path=/path/to/ca.pem` when executing the script. +- The very first execution might take some time to complete. ### Tailing CSA Logs Executing `csa-tail-logs.sh` tails logs from the current CSA leader pod. diff --git a/charts/container-startup-autoscaler/CHANGELOG.md b/charts/container-startup-autoscaler/CHANGELOG.md index cf030e1..00d4e45 100644 --- a/charts/container-startup-autoscaler/CHANGELOG.md +++ b/charts/container-startup-autoscaler/CHANGELOG.md @@ -2,6 +2,19 @@ - Based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.4.0 +2024-12-12 + +### Added +- Support for Kube 1.32. + - Cluster role includes `pods/resize` rule. + +### Changed +- CSA version. + +### CSA Version +[0.5.0](../../CHANGELOG.md#050) + ## 1.3.0 2024-11-29 diff --git a/charts/container-startup-autoscaler/Chart.yaml b/charts/container-startup-autoscaler/Chart.yaml index 6f0d24e..c9aaf77 100644 --- a/charts/container-startup-autoscaler/Chart.yaml +++ b/charts/container-startup-autoscaler/Chart.yaml @@ -3,8 +3,8 @@ name: container-startup-autoscaler description: > container-startup-autoscaler is a Kubernetes controller that modifies the CPU and/or memory resources of containers depending on whether they're starting up. -version: 1.3.0 -appVersion: "0.4.0" +version: 1.4.0 +appVersion: "0.5.0" home: https://github.com/ExpediaGroup/container-startup-autoscaler/README.md sources: - https://github.com/ExpediaGroup/container-startup-autoscaler diff --git a/charts/container-startup-autoscaler/templates/clusterrole.yaml b/charts/container-startup-autoscaler/templates/clusterrole.yaml index 830d786..a06d57b 100644 --- a/charts/container-startup-autoscaler/templates/clusterrole.yaml +++ b/charts/container-startup-autoscaler/templates/clusterrole.yaml @@ -9,6 +9,9 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "patch", "update", "watch"] + - apiGroups: [""] + resources: ["pods/resize"] + verbs: ["patch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "patch", "update"] diff --git a/cmd/container-startup-autoscaler/main.go b/cmd/container-startup-autoscaler/main.go index e5359bb..83663af 100644 --- a/cmd/container-startup-autoscaler/main.go +++ b/cmd/container-startup-autoscaler/main.go @@ -105,10 +105,7 @@ func run(_ *cobra.Command, _ []string) { logging.Fatalf(nil, err, "unable to add healthz check") } - csaController, err := controller.NewController(controllerConfig, runtimeManager) - if err != nil { - logging.Fatalf(nil, err, "unable to create controller") - } + csaController := controller.NewController(controllerConfig, runtimeManager) if err = csaController.Initialize(); err != nil { logging.Fatalf(nil, err, "unable to initialize controller") diff --git a/go.mod b/go.mod index 215a5a2..a981f13 100644 --- a/go.mod +++ b/go.mod @@ -1,78 +1,80 @@ module github.com/ExpediaGroup/container-startup-autoscaler -go 1.22.9 +go 1.23.3 require ( - github.com/avast/retry-go/v4 v4.5.0 - github.com/go-logr/logr v1.3.0 + github.com/avast/retry-go/v4 v4.6.0 + github.com/go-logr/logr v1.4.2 github.com/go-logr/zerologr v1.2.3 - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.6.0 github.com/orcaman/concurrent-map/v2 v2.0.1 - github.com/prometheus/client_golang v1.17.0 - github.com/rs/zerolog v1.31.0 - github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.8.4 - github.com/tonglil/buflogr v1.0.1 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 - k8s.io/component-base v0.28.3 - sigs.k8s.io/controller-runtime v0.16.3 + github.com/prometheus/client_golang v1.20.5 + github.com/rs/zerolog v1.33.0 + github.com/spf13/cobra v1.8.1 + github.com/stretchr/testify v1.10.0 + github.com/tonglil/buflogr v1.1.1 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/component-base v0.32.0 + sigs.k8s.io/controller-runtime v0.19.2 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.7.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.7.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.1 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.13.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.35.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20241127205056-99599406b04f // indirect + k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index bbed53a..521b672 100644 --- a/go.sum +++ b/go.sum @@ -1,65 +1,58 @@ -github.com/avast/retry-go/v4 v4.5.0 h1:QoRAZZ90cj5oni2Lsgl2GW8mNTnUCnmpx/iKpwVisHg= -github.com/avast/retry-go/v4 v4.5.0/go.mod h1:7hLEXp0oku2Nir2xBAsg0PTphp9z71bN5Aq1fboC3+I= +github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= +github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= -github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-logr/zerologr v1.2.3 h1:up5N9vcH9Xck3jJkXzgyOxozT14R47IyDODz8LM1KSs= github.com/go-logr/zerologr v1.2.3/go.mod h1:BxwGo7y5zgSHYR1BjbnHPyF/5ZjVKfKxAZANVu6E8Ho= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -68,13 +61,14 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -83,8 +77,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -92,155 +84,135 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= -github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tonglil/buflogr v1.0.1 h1:WXFZLKxLfqcVSmckwiMCF8jJwjIgmStJmg63YKRF1p0= -github.com/tonglil/buflogr v1.0.1/go.mod h1:yYWwvSpn/3uAaqjf6mJg/XMiAciaR0QcRJH2gJGDxNE= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tonglil/buflogr v1.1.1 h1:CKAjOHBSMmgbRFxpn/RhQHPj5oANc7ekhlsoUDvcZIg= +github.com/tonglil/buflogr v1.1.1/go.mod h1:WLLtPRLqcFYWQLbA+ytXy5WrFTYnfA+beg1MpvJCxm4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= -k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= -k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= -k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= +k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241127205056-99599406b04f h1:nLHvOvs1CZ+FAEwR4EqLeRLfbtWQNlIu5g393Hq/1UM= +k8s.io/kube-openapi v0.0.0-20241127205056-99599406b04f/go.mod h1:iZjdMQzunI7O/sUrf/5WRX1gvaAIam32lKx9+paoLbU= +k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno= +k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= +sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3 h1:sCP7Vv3xx/CWIuTPVN38lUPx0uw0lcLfzaiDa8Ja01A= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/controller/controller.go b/internal/controller/controller.go index 8125cac..c5fd199 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -17,7 +17,7 @@ limitations under the License. package controller import ( - "errors" + "sync" "github.com/ExpediaGroup/container-startup-autoscaler/internal/common" "github.com/ExpediaGroup/container-startup-autoscaler/internal/controller/controllercommon" @@ -35,93 +35,97 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -const Name = "container-startup-autoscaler" +const Name = "csa" +var onceInstance sync.Once var instance *controller // controller represents the CSA controller itself. type controller struct { - initialized bool - controllerConfig controllercommon.ControllerConfig runtimeManager manager.Manager + + onceInit sync.Once } func NewController( controllerConfig controllercommon.ControllerConfig, runtimeManager manager.Manager, -) (*controller, error) { - if instance != nil { - return &controller{}, errors.New("controller already previously created") - } - - return &controller{ - controllerConfig: controllerConfig, - runtimeManager: runtimeManager, - }, nil +) *controller { + onceInstance.Do(func() { + instance = &controller{ + controllerConfig: controllerConfig, + runtimeManager: runtimeManager, + } + }) + + return instance } // Initialize performs the tasks necessary to initialize the controller and register it with the controller-runtime -// manager. May only be invoked once. runtimeController parameter is provided for test injection. +// manager. Will only be invoked once. runtimeController parameter is provided for test injection. func (c *controller) Initialize(runtimeController ...runtimecontroller.Controller) error { - if c.initialized { - return errors.New("controller already initialized") - } - - reconciler := NewContainerStartupAutoscalerReconciler( - pod.NewPod(c.controllerConfig, c.runtimeManager.GetClient(), c.runtimeManager.GetEventRecorderFor(Name)), - c.controllerConfig, - ) - - var actualRuntimeController runtimecontroller.Controller - - if len(runtimeController) == 0 { - var err error - actualRuntimeController, err = runtimecontroller.New( - Name, - c.runtimeManager, - runtimecontroller.Options{ - MaxConcurrentReconciles: c.controllerConfig.MaxConcurrentReconciles, - Reconciler: reconciler, - LogConstructor: func(req *reconcile.Request) logr.Logger { - log := logging.Logger - log = log.WithValues("controller", Name) - - if req != nil { - log = log.WithValues( - "namespace", req.Namespace, - "name", req.Name, - ) - } - - return log - }, - }, + var retErr error + + c.onceInit.Do(func() { + reconciler := NewContainerStartupAutoscalerReconciler( + pod.NewPod(c.controllerConfig, c.runtimeManager.GetClient(), c.runtimeManager.GetEventRecorderFor(Name)), + c.controllerConfig, ) - if err != nil { - return common.WrapErrorf(err, "unable to create controller-runtime controller") + + var actualRuntimeController runtimecontroller.Controller + + if len(runtimeController) == 0 { + var err error + actualRuntimeController, err = runtimecontroller.New( + Name, + c.runtimeManager, + runtimecontroller.Options{ + MaxConcurrentReconciles: c.controllerConfig.MaxConcurrentReconciles, + Reconciler: reconciler, + LogConstructor: func(req *reconcile.Request) logr.Logger { + log := logging.Logger + log = log.WithValues("controller", Name) + + if req != nil { + log = log.WithValues( + "namespace", req.Namespace, + "name", req.Name, + ) + } + + return log + }, + }, + ) + if err != nil { + retErr = common.WrapErrorf(err, "unable to create controller-runtime controller") + return + } + } else { + actualRuntimeController = runtimeController[0] + } + + // Predicates are employed to filter out pod changes that are not necessary to reconcile. + if err := actualRuntimeController.Watch( + source.Kind( + c.runtimeManager.GetCache(), + &v1.Pod{}, + &handler.TypedEnqueueRequestForObject[*v1.Pod]{}, + predicate.TypedFuncs[*v1.Pod]{ + CreateFunc: PredicateCreateFunc, + DeleteFunc: PredicateDeleteFunc, + UpdateFunc: PredicateUpdateFunc, + GenericFunc: PredicateGenericFunc, + }, + ), + ); err != nil { + retErr = common.WrapErrorf(err, "unable to watch pods") + return } - } else { - actualRuntimeController = runtimeController[0] - } - - // Predicates are employed to filter out pod changes that are not necessary to reconcile. - if err := actualRuntimeController.Watch( - source.Kind(c.runtimeManager.GetCache(), &v1.Pod{}), - &handler.EnqueueRequestForObject{}, - predicate.Funcs{ - CreateFunc: PredicateCreateFunc, - DeleteFunc: PredicateDeleteFunc, - UpdateFunc: PredicateUpdateFunc, - GenericFunc: PredicateGenericFunc, - }, - ); err != nil { - return common.WrapErrorf(err, "unable to watch pods") - } - - csametrics.RegisterAllMetrics(metrics.Registry, Name) - defer csametrics.UnregisterAllMetrics(metrics.Registry) - c.initialized = true - - return nil + + csametrics.RegisterAllMetrics(metrics.Registry) + }) + + return retErr } diff --git a/internal/controller/controller_test.go b/internal/controller/controller_test.go index e0ec49d..db4eff2 100644 --- a/internal/controller/controller_test.go +++ b/internal/controller/controller_test.go @@ -33,10 +33,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/config" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -56,8 +54,8 @@ func (m *mockController) Reconcile(_ context.Context, _ reconcile.Request) (reco panic(errors.New("not supported")) } -func (m *mockController) Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error { - args := m.Called(src, eventhandler, predicates) +func (m *mockController) Watch(src source.TypedSource[reconcile.Request]) error { + args := m.Called(src) return args.Error(0) } @@ -142,7 +140,7 @@ func (m *mockRuntimeManager) Elected() <-chan struct{} { panic(errors.New("not supported")) } -func (m *mockRuntimeManager) AddMetricsExtraHandler(_ string, _ http.Handler) error { +func (m *mockRuntimeManager) AddMetricsServerExtraHandler(_ string, _ http.Handler) error { panic(errors.New("not supported")) } @@ -168,22 +166,16 @@ func (m *mockRuntimeManager) GetControllerOptions() config.Controller { // --------------------------------------------------------------------------------------------------------------------- func TestNewController(t *testing.T) { - t.Run("AlreadyPreviouslyCreated", func(t *testing.T) { - instance = &controller{} - cont, err := NewController(controllercommon.ControllerConfig{}, nil) - assert.Contains(t, err.Error(), "controller already previously created") - assert.Empty(t, cont) - instance = nil - }) - t.Run("Ok", func(t *testing.T) { - conf := controllercommon.ControllerConfig{} + conf := controllercommon.ControllerConfig{KubeConfig: "test1"} runtimeManager := newMockRuntimeManager(func(*mockRuntimeManager) {}) - cont, err := NewController(conf, runtimeManager) - assert.Nil(t, err) - assert.False(t, cont.initialized) + cont := NewController(conf, runtimeManager) assert.Equal(t, conf, cont.controllerConfig) assert.Equal(t, runtimeManager, cont.runtimeManager) + + conf = controllercommon.ControllerConfig{KubeConfig: "test2"} + cont = NewController(conf, runtimeManager) + assert.Equal(t, "test1", cont.controllerConfig.KubeConfig) }) } @@ -192,18 +184,8 @@ func TestControllerInitialize(t *testing.T) { name string configManagerMockFunc func(*mockRuntimeManager) configControllerMockFunc func(*mockController) - started bool - wantStarted bool wantErrMsg string }{ - { - name: "AlreadyInitialized", - configManagerMockFunc: func(*mockRuntimeManager) {}, - configControllerMockFunc: func(*mockController) {}, - started: true, - wantStarted: true, - wantErrMsg: "controller already initialized", - }, { name: "UnableToWatchPods", configManagerMockFunc: func(runtimeManager *mockRuntimeManager) { @@ -214,9 +196,7 @@ func TestControllerInitialize(t *testing.T) { configControllerMockFunc: func(controller *mockController) { controller.On("Watch", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("")) }, - started: false, - wantStarted: false, - wantErrMsg: "unable to watch pods", + wantErrMsg: "unable to watch pods", }, { name: "Ok", @@ -229,14 +209,11 @@ func TestControllerInitialize(t *testing.T) { configControllerMockFunc: func(controller *mockController) { controller.On("Watch", mock.Anything, mock.Anything, mock.Anything).Return(nil) }, - started: false, - wantStarted: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &controller{ - initialized: tt.started, controllerConfig: controllercommon.ControllerConfig{}, runtimeManager: newMockRuntimeManager(tt.configManagerMockFunc), } @@ -247,7 +224,6 @@ func TestControllerInitialize(t *testing.T) { } else { assert.Nil(t, err) } - assert.Equal(t, tt.wantStarted, c.initialized) }) } } diff --git a/internal/controller/controllercommon/config.go b/internal/controller/controllercommon/config.go index 36b6f6b..e22e400 100644 --- a/internal/controller/controllercommon/config.go +++ b/internal/controller/controllercommon/config.go @@ -46,7 +46,7 @@ const ( FlagRequeueDurationSecsName = "requeue-duration-secs" FlagRequeueDurationSecsDesc = "how long to wait before requeuing a reconcile" - FlagRequeueDurationSecsDefault = 3 + FlagRequeueDurationSecsDefault = 1 FlagMaxConcurrentReconcilesName = "max-concurrent-reconciles" FlagMaxConcurrentReconcilesDesc = "the maximum number of concurrent reconciles" diff --git a/internal/controller/predicatefunc.go b/internal/controller/predicatefunc.go index 438dc8a..e5aecf0 100644 --- a/internal/controller/predicatefunc.go +++ b/internal/controller/predicatefunc.go @@ -20,7 +20,7 @@ import ( "github.com/ExpediaGroup/container-startup-autoscaler/internal/common" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/reconciler" "github.com/ExpediaGroup/container-startup-autoscaler/internal/pod/podcommon" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/event" ) @@ -53,21 +53,21 @@ import ( */ // PredicateCreateFunc reports whether create events should be reconciled. -func PredicateCreateFunc(_ event.CreateEvent) bool { +func PredicateCreateFunc(_ event.TypedCreateEvent[*v1.Pod]) bool { // Never filter. return true } // PredicateDeleteFunc reports whether delete events should be reconciled. -func PredicateDeleteFunc(_ event.DeleteEvent) bool { +func PredicateDeleteFunc(_ event.TypedDeleteEvent[*v1.Pod]) bool { // Don't need to reconcile deletes. return false } // PredicateUpdateFunc reports whether update events should be reconciled. -func PredicateUpdateFunc(event event.UpdateEvent) bool { - oldPod := event.ObjectOld.(*v1.Pod) - newPod := event.ObjectNew.(*v1.Pod) +func PredicateUpdateFunc(event event.TypedUpdateEvent[*v1.Pod]) bool { + oldPod := event.ObjectOld + newPod := event.ObjectNew if oldPod.ResourceVersion == newPod.ResourceVersion { // Shouldn't really find ourselves here... @@ -118,6 +118,6 @@ func PredicateUpdateFunc(event event.UpdateEvent) bool { } // PredicateGenericFunc reports whether generic events should be reconciled. -func PredicateGenericFunc(_ event.GenericEvent) bool { +func PredicateGenericFunc(_ event.TypedGenericEvent[*v1.Pod]) bool { return false } diff --git a/internal/controller/predicatefunc_test.go b/internal/controller/predicatefunc_test.go index a33259f..dea99eb 100644 --- a/internal/controller/predicatefunc_test.go +++ b/internal/controller/predicatefunc_test.go @@ -29,18 +29,18 @@ import ( ) func TestPredicateCreateFunc(t *testing.T) { - assert.True(t, PredicateCreateFunc(event.CreateEvent{})) + assert.True(t, PredicateCreateFunc(event.TypedCreateEvent[*v1.Pod]{})) } func TestPredicateDeleteFunc(t *testing.T) { - assert.False(t, PredicateDeleteFunc(event.DeleteEvent{})) + assert.False(t, PredicateDeleteFunc(event.TypedDeleteEvent[*v1.Pod]{})) } func TestPredicateUpdateFunc(t *testing.T) { t.Run("ResourceVersionSame", func(t *testing.T) { oldPod, newPod := &v1.Pod{}, &v1.Pod{} oldPod.ResourceVersion, newPod.ResourceVersion = "1", "1" - evt := event.UpdateEvent{ + evt := event.TypedUpdateEvent[*v1.Pod]{ ObjectOld: oldPod, ObjectNew: newPod, } @@ -52,7 +52,7 @@ func TestPredicateUpdateFunc(t *testing.T) { oldPod.ResourceVersion, newPod.ResourceVersion = "1", "2" now := metav1.Now() newPod.DeletionTimestamp = &now - evt := event.UpdateEvent{ + evt := event.TypedUpdateEvent[*v1.Pod]{ ObjectOld: oldPod, ObjectNew: newPod, } @@ -62,7 +62,7 @@ func TestPredicateUpdateFunc(t *testing.T) { t.Run("StatusMissingOldNew", func(t *testing.T) { oldPod, newPod := &v1.Pod{}, &v1.Pod{} oldPod.ResourceVersion, newPod.ResourceVersion = "1", "2" - evt := event.UpdateEvent{ + evt := event.TypedUpdateEvent[*v1.Pod]{ ObjectOld: oldPod, ObjectNew: newPod, } @@ -74,7 +74,7 @@ func TestPredicateUpdateFunc(t *testing.T) { oldPod.ResourceVersion, newPod.ResourceVersion = "1", "2" oldPod.Annotations = map[string]string{podcommon.AnnotationStatus: "test1"} newPod.Annotations = map[string]string{podcommon.AnnotationStatus: "test2"} - evt := event.UpdateEvent{ + evt := event.TypedUpdateEvent[*v1.Pod]{ ObjectOld: oldPod, ObjectNew: newPod, } @@ -89,7 +89,7 @@ func TestPredicateUpdateFunc(t *testing.T) { oldPod.Annotations = map[string]string{podcommon.AnnotationStatus: "test1"} newPod.Annotations = map[string]string{podcommon.AnnotationStatus: "test1"} oldPod.ObjectMeta.Name, oldPod.ObjectMeta.Name = "test1", "test2" - evt := event.UpdateEvent{ + evt := event.TypedUpdateEvent[*v1.Pod]{ ObjectOld: oldPod, ObjectNew: newPod, } @@ -98,5 +98,5 @@ func TestPredicateUpdateFunc(t *testing.T) { } func TestPredicateGenericFunc(t *testing.T) { - assert.False(t, PredicateGenericFunc(event.GenericEvent{})) + assert.False(t, PredicateGenericFunc(event.TypedGenericEvent[*v1.Pod]{})) } diff --git a/internal/logging/log_test.go b/internal/logging/log_test.go index 8ce3930..99f7c77 100644 --- a/internal/logging/log_test.go +++ b/internal/logging/log_test.go @@ -287,7 +287,6 @@ func testContextPodInfo() context.Context { podcommon.StateBoolFalse, podcommon.StateBoolFalse, podcommon.StateResourcesUnknown, - podcommon.StateAllocatedResourcesUnknown, podcommon.StateStatusResourcesUnknown, )) return ctx diff --git a/internal/metrics/informercache/informercache.go b/internal/metrics/informercache/informercache.go new file mode 100644 index 0000000..b8aa4b7 --- /dev/null +++ b/internal/metrics/informercache/informercache.go @@ -0,0 +1,71 @@ +/* +Copyright 2024 Expedia Group, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package informercache + +import ( + "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/metricscommon" + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +const ( + Subsystem = "informercache" +) + +const ( + syncPollName = "sync_poll" + syncTimeoutName = "sync_timeout" +) + +var ( + syncPoll = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: metricscommon.Namespace, + Subsystem: Subsystem, + Name: syncPollName, + Help: "Number of informer cache sync polls after a pod mutation was performed via the Kube API", + Buckets: []float64{1, 2, 4, 8, 16, 32, 64}, + }, []string{}) + + syncTimeout = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: metricscommon.Namespace, + Subsystem: Subsystem, + Name: syncTimeoutName, + Help: "Number of informer cache sync timeouts after a pod mutation was performed via the Kube API (may result in inconsistent CSA status updates)", + }, []string{}) +) + +// allMetrics must include all metrics defined above. +var allMetrics = []prometheus.Collector{ + syncPoll, + syncTimeout, +} + +func RegisterMetrics(registry metrics.RegistererGatherer) { + registry.MustRegister(allMetrics...) +} + +func ResetMetrics() { + metricscommon.ResetMetrics(allMetrics) +} + +func SyncPoll() prometheus.Observer { + return syncPoll.WithLabelValues() +} + +func SyncTimeout() prometheus.Counter { + return syncTimeout.WithLabelValues() +} diff --git a/internal/metrics/informercache/informercache_test.go b/internal/metrics/informercache/informercache_test.go new file mode 100644 index 0000000..1f8d004 --- /dev/null +++ b/internal/metrics/informercache/informercache_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2024 Expedia Group, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package informercache + +import ( + "fmt" + "sync" + "testing" + + "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/metricscommon" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "k8s.io/component-base/metrics/testutil" +) + +func TestRegisterMetrics(t *testing.T) { + registry := prometheus.NewRegistry() + RegisterMetrics(registry) + assert.Equal(t, len(allMetrics), len(descs(registry))) +} + +func TestResetMetrics(t *testing.T) { + SyncTimeout().Inc() + value, _ := testutil.GetCounterMetricValue(SyncTimeout()) + assert.Equal(t, float64(1), value) + ResetMetrics() + + value, _ = testutil.GetCounterMetricValue(SyncTimeout()) + assert.Equal(t, float64(0), value) +} + +func TestPatchSyncPoll(t *testing.T) { + m := SyncPoll().(prometheus.Metric) + assert.Contains( + t, + m.Desc().String(), + fmt.Sprintf("%s_%s_%s", metricscommon.Namespace, Subsystem, syncPollName), + ) +} + +func TestPatchSyncTimeout(t *testing.T) { + m := SyncTimeout() + assert.Contains( + t, + m.Desc().String(), + fmt.Sprintf("%s_%s_%s", metricscommon.Namespace, Subsystem, syncTimeoutName), + ) +} + +func descs(registry *prometheus.Registry) []string { + ch := make(chan *prometheus.Desc) + done := make(chan struct{}) + var ret []string + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + for { + select { + case desc := <-ch: + ret = append(ret, desc.String()) + case <-done: + return + } + } + }() + + registry.Describe(ch) + done <- struct{}{} + wg.Wait() + return ret +} diff --git a/internal/metrics/metricscommon/const.go b/internal/metrics/metricscommon/const.go index 711aa34..1ceb822 100644 --- a/internal/metrics/metricscommon/const.go +++ b/internal/metrics/metricscommon/const.go @@ -19,10 +19,9 @@ package metricscommon const ( Namespace = "csa" - ControllerNameLabelName = "controller" - DirectionLabelName = "direction" - OutcomeLabelName = "outcome" - ReasonLabelName = "reason" + DirectionLabelName = "direction" + OutcomeLabelName = "outcome" + ReasonLabelName = "reason" ) // Direction indicates the direction of a scale. diff --git a/internal/metrics/reconciler/reconciler.go b/internal/metrics/reconciler/reconciler.go index a2afec6..ee15c4e 100644 --- a/internal/metrics/reconciler/reconciler.go +++ b/internal/metrics/reconciler/reconciler.go @@ -36,57 +36,55 @@ const ( failureStatesActionName = "failure_states_action" ) -var cName string - var ( skippedOnlyStatusChange = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: skippedOnlyStatusChangeName, Help: "Number of reconciles that were skipped because only the scaler controller status changed", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) existingInProgress = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: existingInProgressName, Help: "Number of attempted reconciles where one was already in progress for the same namespace/name (results in a requeue)", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) failureUnableToGetPod = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: failureUnableToGetPodName, Help: "Number of reconciles where there was a failure to get the pod (results in a requeue)", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) failurePodDoesntExist = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: failurePodDoesntExistName, Help: "Number of reconciles where the pod was found not to exist (results in failure)", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) failureValidation = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: failureValidationName, Help: "Number of reconciles where there was a failure to validate (results in failure)", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) failureStatesDetermination = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: failureStatesDeterminationName, Help: "Number of reconciles where there was a failure to determine states (results in failure)", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) failureStatesAction = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: failureStatesActionName, Help: "Number of reconciles where there was a failure to action the determined states (results in failure)", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) ) // allMetrics must include all metrics defined above. @@ -95,45 +93,38 @@ var allMetrics = []prometheus.Collector{ failureStatesDetermination, failureStatesAction, } -func RegisterMetrics(registry metrics.RegistererGatherer, controllerName string) { - cName = controllerName +func RegisterMetrics(registry metrics.RegistererGatherer) { registry.MustRegister(allMetrics...) } -func UnregisterMetrics(registry metrics.RegistererGatherer) { - for _, metric := range allMetrics { - registry.Unregister(metric) - } -} - func ResetMetrics() { metricscommon.ResetMetrics(allMetrics) } func SkippedOnlyStatusChange() prometheus.Counter { - return skippedOnlyStatusChange.WithLabelValues(cName) + return skippedOnlyStatusChange.WithLabelValues() } func ExistingInProgress() prometheus.Counter { - return existingInProgress.WithLabelValues(cName) + return existingInProgress.WithLabelValues() } func FailureUnableToGetPod() prometheus.Counter { - return failureUnableToGetPod.WithLabelValues(cName) + return failureUnableToGetPod.WithLabelValues() } func FailurePodDoesntExist() prometheus.Counter { - return failurePodDoesntExist.WithLabelValues(cName) + return failurePodDoesntExist.WithLabelValues() } func FailureValidation() prometheus.Counter { - return failureValidation.WithLabelValues(cName) + return failureValidation.WithLabelValues() } func FailureStatesDetermination() prometheus.Counter { - return failureStatesDetermination.WithLabelValues(cName) + return failureStatesDetermination.WithLabelValues() } func FailureStatesAction() prometheus.Counter { - return failureStatesAction.WithLabelValues(cName) + return failureStatesAction.WithLabelValues() } diff --git a/internal/metrics/reconciler/reconciler_test.go b/internal/metrics/reconciler/reconciler_test.go index 5a8fa89..8cfd6b9 100644 --- a/internal/metrics/reconciler/reconciler_test.go +++ b/internal/metrics/reconciler/reconciler_test.go @@ -29,17 +29,10 @@ import ( func TestRegisterMetrics(t *testing.T) { registry := prometheus.NewRegistry() - RegisterMetrics(registry, "") + RegisterMetrics(registry) assert.Equal(t, len(allMetrics), len(descs(registry))) } -func TestUnregisterMetrics(t *testing.T) { - registry := prometheus.NewRegistry() - RegisterMetrics(registry, "") - UnregisterMetrics(registry) - assert.Equal(t, 0, len(descs(registry))) -} - func TestResetMetrics(t *testing.T) { SkippedOnlyStatusChange().Inc() value, _ := testutil.GetCounterMetricValue(SkippedOnlyStatusChange()) diff --git a/internal/metrics/registry.go b/internal/metrics/registry.go index e923962..d8c3f8f 100644 --- a/internal/metrics/registry.go +++ b/internal/metrics/registry.go @@ -17,6 +17,7 @@ limitations under the License. package metrics import ( + "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/informercache" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/reconciler" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/retry" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/scale" @@ -24,15 +25,9 @@ import ( ) // RegisterAllMetrics registers all metrics with the supplied registry. -func RegisterAllMetrics(registry metrics.RegistererGatherer, controllerName string) { - reconciler.RegisterMetrics(registry, controllerName) - retry.RegisterKubeApiMetrics(registry, controllerName) - scale.RegisterMetrics(registry, controllerName) -} - -// UnregisterAllMetrics unregisters all metrics within the supplied registry. -func UnregisterAllMetrics(registry metrics.RegistererGatherer) { - reconciler.UnregisterMetrics(registry) - retry.UnregisterKubeApiMetrics(registry) - scale.UnregisterMetrics(registry) +func RegisterAllMetrics(registry metrics.RegistererGatherer) { + reconciler.RegisterMetrics(registry) + retry.RegisterKubeApiMetrics(registry) + scale.RegisterMetrics(registry) + informercache.RegisterMetrics(registry) } diff --git a/internal/metrics/registry_test.go b/internal/metrics/registry_test.go index 0ab7707..650e70a 100644 --- a/internal/metrics/registry_test.go +++ b/internal/metrics/registry_test.go @@ -22,6 +22,7 @@ import ( "sync" "testing" + "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/informercache" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/metricscommon" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/reconciler" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/retry" @@ -32,29 +33,19 @@ import ( func TestRegisterAllMetrics(t *testing.T) { registry := prometheus.NewRegistry() - RegisterAllMetrics(registry, "test") + RegisterAllMetrics(registry) - gotReconciler, gotRetryKubeapi, gotScale := gotSubsystems(registry) + gotReconciler, gotRetryKubeapi, gotScale, gotInformerCache := gotSubsystems(registry) assert.True(t, gotReconciler) assert.True(t, gotScale) assert.True(t, gotRetryKubeapi) + assert.True(t, gotInformerCache) } -func TestUnregisterAllMetrics(t *testing.T) { - registry := prometheus.NewRegistry() - RegisterAllMetrics(registry, "") - UnregisterAllMetrics(registry) - - gotReconciler, gotRetryKubeapi, gotScale := gotSubsystems(registry) - assert.False(t, gotReconciler) - assert.False(t, gotScale) - assert.False(t, gotRetryKubeapi) -} - -func gotSubsystems(registry *prometheus.Registry) (bool, bool, bool) { +func gotSubsystems(registry *prometheus.Registry) (bool, bool, bool, bool) { descCh := make(chan *prometheus.Desc) doneCh := make(chan struct{}) - gotReconciler, gotRetryKubeapi, gotScale := false, false, false + gotReconciler, gotRetryKubeapi, gotScale, gotInformerCache := false, false, false, false var wg sync.WaitGroup wg.Add(1) @@ -75,6 +66,11 @@ func gotSubsystems(registry *prometheus.Registry) (bool, bool, bool) { if strings.Contains(desc.String(), fmt.Sprintf("%s_%s", metricscommon.Namespace, scale.Subsystem)) { gotScale = true } + + if strings.Contains(desc.String(), fmt.Sprintf("%s_%s", metricscommon.Namespace, informercache.Subsystem)) { + gotInformerCache = true + } + case <-doneCh: return } @@ -84,5 +80,5 @@ func gotSubsystems(registry *prometheus.Registry) (bool, bool, bool) { registry.Describe(descCh) doneCh <- struct{}{} wg.Wait() - return gotReconciler, gotRetryKubeapi, gotScale + return gotReconciler, gotRetryKubeapi, gotScale, gotInformerCache } diff --git a/internal/metrics/retry/kubeapi.go b/internal/metrics/retry/kubeapi.go index 74203e1..9c2aa17 100644 --- a/internal/metrics/retry/kubeapi.go +++ b/internal/metrics/retry/kubeapi.go @@ -30,15 +30,13 @@ const ( retryName = "retry" ) -var cName string - var ( retry = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: SubsystemKubeApi, Name: retryName, Help: "Number of Kube API retries (by reason)", - }, []string{metricscommon.ControllerNameLabelName, metricscommon.ReasonLabelName}) + }, []string{metricscommon.ReasonLabelName}) ) // allMetrics must include all metrics defined above. @@ -46,21 +44,14 @@ var allMetrics = []prometheus.Collector{ retry, } -func RegisterKubeApiMetrics(registry metrics.RegistererGatherer, controllerName string) { - cName = controllerName +func RegisterKubeApiMetrics(registry metrics.RegistererGatherer) { registry.MustRegister(allMetrics...) } -func UnregisterKubeApiMetrics(registry metrics.RegistererGatherer) { - for _, metric := range allMetrics { - registry.Unregister(metric) - } -} - func ResetKubeApiMetrics() { metricscommon.ResetMetrics(allMetrics) } func Retry(reason string) prometheus.Counter { - return retry.WithLabelValues(cName, reason) + return retry.WithLabelValues(reason) } diff --git a/internal/metrics/retry/kubeapi_test.go b/internal/metrics/retry/kubeapi_test.go index b08bbb7..374aff7 100644 --- a/internal/metrics/retry/kubeapi_test.go +++ b/internal/metrics/retry/kubeapi_test.go @@ -29,17 +29,10 @@ import ( func TestRegisterKubeApiMetrics(t *testing.T) { registry := prometheus.NewRegistry() - RegisterKubeApiMetrics(registry, "") + RegisterKubeApiMetrics(registry) assert.Equal(t, len(allMetrics), len(descs(registry))) } -func TestUnregisterKubeApiMetrics(t *testing.T) { - registry := prometheus.NewRegistry() - RegisterKubeApiMetrics(registry, "") - UnregisterKubeApiMetrics(registry) - assert.Equal(t, 0, len(descs(registry))) -} - func TestResetKubeApiMetrics(t *testing.T) { Retry("").Inc() value, _ := testutil.GetCounterMetricValue(Retry("")) diff --git a/internal/metrics/scale/scale.go b/internal/metrics/scale/scale.go index 332351c..bdaad43 100644 --- a/internal/metrics/scale/scale.go +++ b/internal/metrics/scale/scale.go @@ -32,22 +32,20 @@ const ( durationName = "duration_seconds" ) -var cName string - var ( failure = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: failureName, Help: "Number of scale failures (by scale direction, reason)", - }, []string{metricscommon.ControllerNameLabelName, metricscommon.DirectionLabelName, metricscommon.ReasonLabelName}) + }, []string{metricscommon.DirectionLabelName, metricscommon.ReasonLabelName}) commandedUnknownRes = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metricscommon.Namespace, Subsystem: Subsystem, Name: commandedUnknownResName, Help: "Number of scales commanded upon encountering unknown resources", - }, []string{metricscommon.ControllerNameLabelName}) + }, []string{}) duration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: metricscommon.Namespace, @@ -55,7 +53,7 @@ var ( Name: durationName, Help: "Scale duration (from commanded to enacted) in seconds (by scale direction, outcome)", Buckets: []float64{1, 2, 4, 8, 16, 32, 64, 128}, - }, []string{metricscommon.ControllerNameLabelName, metricscommon.DirectionLabelName, metricscommon.OutcomeLabelName}) + }, []string{metricscommon.DirectionLabelName, metricscommon.OutcomeLabelName}) ) // allMetrics must include all metrics defined above. @@ -63,29 +61,22 @@ var allMetrics = []prometheus.Collector{ failure, commandedUnknownRes, duration, } -func RegisterMetrics(registry metrics.RegistererGatherer, controllerName string) { - cName = controllerName +func RegisterMetrics(registry metrics.RegistererGatherer) { registry.MustRegister(allMetrics...) } -func UnregisterMetrics(registry metrics.RegistererGatherer) { - for _, metric := range allMetrics { - registry.Unregister(metric) - } -} - func ResetMetrics() { metricscommon.ResetMetrics(allMetrics) } func Failure(direction metricscommon.Direction, reason string) prometheus.Counter { - return failure.WithLabelValues(cName, string(direction), reason) + return failure.WithLabelValues(string(direction), reason) } func CommandedUnknownRes() prometheus.Counter { - return commandedUnknownRes.WithLabelValues(cName) + return commandedUnknownRes.WithLabelValues() } func Duration(direction metricscommon.Direction, outcome metricscommon.Outcome) prometheus.Observer { - return duration.WithLabelValues(cName, string(direction), string(outcome)) + return duration.WithLabelValues(string(direction), string(outcome)) } diff --git a/internal/metrics/scale/scale_test.go b/internal/metrics/scale/scale_test.go index f5aca2b..cded559 100644 --- a/internal/metrics/scale/scale_test.go +++ b/internal/metrics/scale/scale_test.go @@ -29,17 +29,10 @@ import ( func TestRegisterMetrics(t *testing.T) { registry := prometheus.NewRegistry() - RegisterMetrics(registry, "") + RegisterMetrics(registry) assert.Equal(t, len(allMetrics), len(descs(registry))) } -func TestUnregisterMetrics(t *testing.T) { - registry := prometheus.NewRegistry() - RegisterMetrics(registry, "") - UnregisterMetrics(registry) - assert.Equal(t, 0, len(descs(registry))) -} - func TestResetMetrics(t *testing.T) { Failure("", "").Inc() value, _ := testutil.GetCounterMetricValue(Failure("", "")) diff --git a/internal/pod/containerkubehelper.go b/internal/pod/containerkubehelper.go index 282934d..9019292 100644 --- a/internal/pod/containerkubehelper.go +++ b/internal/pod/containerkubehelper.go @@ -36,7 +36,6 @@ type ContainerKubeHelper interface { Requests(*v1.Container, v1.ResourceName) resource.Quantity Limits(*v1.Container, v1.ResourceName) resource.Quantity ResizePolicy(*v1.Container, v1.ResourceName) (v1.ResourceResizeRestartPolicy, error) - AllocatedResources(*v1.Pod, string, v1.ResourceName) (resource.Quantity, error) CurrentRequests(*v1.Pod, string, v1.ResourceName) (resource.Quantity, error) CurrentLimits(*v1.Pod, string, v1.ResourceName) (resource.Quantity, error) } @@ -153,32 +152,6 @@ func (h containerKubeHelper) ResizePolicy( panic(fmt.Errorf("resourceName '%s' not supported", resourceName)) } -// AllocatedResources returns allocated resources for the supplied containerName and resourceName, from the supplied -// pod. -func (h containerKubeHelper) AllocatedResources( - pod *v1.Pod, - containerName string, - resourceName v1.ResourceName, -) (resource.Quantity, error) { - stat, err := h.status(pod, containerName) - if err != nil { - return resource.Quantity{}, common.WrapErrorf(err, "unable to get container status") - } - - if stat.AllocatedResources == nil { - return resource.Quantity{}, NewContainerStatusAllocatedResourcesNotPresentError() - } - - switch resourceName { - case v1.ResourceCPU: - return *stat.AllocatedResources.Cpu(), nil - case v1.ResourceMemory: - return *stat.AllocatedResources.Memory(), nil - } - - panic(fmt.Errorf("resourceName '%s' not supported", resourceName)) -} - // CurrentRequests returns currently enacted requests for the supplied containerName and resourceName, from the // supplied pod. func (h containerKubeHelper) CurrentRequests( diff --git a/internal/pod/containerkubehelper_test.go b/internal/pod/containerkubehelper_test.go index dea9425..5832fcf 100644 --- a/internal/pod/containerkubehelper_test.go +++ b/internal/pod/containerkubehelper_test.go @@ -476,91 +476,6 @@ func TestContainerKubeHelperResizePolicy(t *testing.T) { } } -func TestContainerKubeHelperAllocatedResources(t *testing.T) { - type args struct { - pod *v1.Pod - name string - resourceName v1.ResourceName - } - tests := []struct { - name string - args args - want resource.Quantity - wantErrMsg string - wantPanicErrMsg string - }{ - { - name: "UnableToGetContainerStatus", - args: args{ - pod: &v1.Pod{}, - name: podtest.DefaultContainerName, - resourceName: v1.ResourceCPU, - }, - want: resource.Quantity{}, - wantErrMsg: "unable to get container status", - }, - { - name: "NilAllocatedResources", - args: args{ - - pod: podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)). - NilStatusAllocatedResources().Build(), - name: podtest.DefaultContainerName, - resourceName: v1.ResourceCPU, - }, - want: resource.Quantity{}, - wantErrMsg: "container status allocated resources not present", - }, - { - name: "Cpu", - args: args{ - pod: podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), - name: podtest.DefaultContainerName, - resourceName: v1.ResourceCPU, - }, - want: podtest.PodAnnotationCpuStartupQuantity, - }, - { - name: "Memory", - args: args{ - pod: podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), - name: podtest.DefaultContainerName, - resourceName: v1.ResourceMemory, - }, - want: podtest.PodAnnotationMemoryStartupQuantity, - }, - { - name: "ResourceNameNotSupported", - args: args{ - pod: podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), - name: podtest.DefaultContainerName, - resourceName: v1.ResourceConfigMaps, - }, - wantPanicErrMsg: fmt.Sprintf("resourceName '%s' not supported", v1.ResourceConfigMaps), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newContainerKubeHelper() - - if tt.wantPanicErrMsg != "" { - assert.PanicsWithError(t, tt.wantPanicErrMsg, func() { - _, _ = h.AllocatedResources(tt.args.pod, tt.args.name, tt.args.resourceName) - }) - return - } - - got, err := h.AllocatedResources(tt.args.pod, tt.args.name, tt.args.resourceName) - if tt.wantErrMsg != "" { - assert.Contains(t, err.Error(), tt.wantErrMsg) - } else { - assert.Nil(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - func TestContainerKubeHelperCurrentRequests(t *testing.T) { type args struct { pod *v1.Pod diff --git a/internal/pod/error.go b/internal/pod/error.go index 9ec0cd4..a497660 100644 --- a/internal/pod/error.go +++ b/internal/pod/error.go @@ -39,7 +39,7 @@ func NewValidationError(message string, toWrap error) error { func (e ValidationError) Error() string { if e.wrapped == nil { - return fmt.Sprintf("validation error: %s", e.message) + return "validation error: " + e.message } return fmt.Errorf("validation error: %s: %w", e.message, e.wrapped).Error() @@ -60,18 +60,6 @@ func (e ContainerStatusNotPresentError) Error() string { return "container status not present" } -// ContainerStatusAllocatedResourcesNotPresentError is an error that indicates container status allocated resources -// is not present. -type ContainerStatusAllocatedResourcesNotPresentError struct{} - -func NewContainerStatusAllocatedResourcesNotPresentError() error { - return ContainerStatusAllocatedResourcesNotPresentError{} -} - -func (e ContainerStatusAllocatedResourcesNotPresentError) Error() string { - return "container status allocated resources not present" -} - // ContainerStatusResourcesNotPresentError is an error that indicates container status resources is not present. type ContainerStatusResourcesNotPresentError struct{} diff --git a/internal/pod/error_test.go b/internal/pod/error_test.go index 615de39..52dbe02 100644 --- a/internal/pod/error_test.go +++ b/internal/pod/error_test.go @@ -52,15 +52,6 @@ func TestContainerStatusNotPresentErrorError(t *testing.T) { assert.Equal(t, "container status not present", e.Error()) } -func TestNewContainerStatusAllocatedResourcesNotPresentError(t *testing.T) { - assert.True(t, errors.As(NewContainerStatusAllocatedResourcesNotPresentError(), &ContainerStatusAllocatedResourcesNotPresentError{})) -} - -func TestContainerStatusAllocatedResourcesNotPresentErrorError(t *testing.T) { - e := NewContainerStatusAllocatedResourcesNotPresentError() - assert.Equal(t, "container status allocated resources not present", e.Error()) -} - func TestNewContainerStatusResourcesNotPresentError(t *testing.T) { assert.True(t, errors.As(NewContainerStatusResourcesNotPresentError(), &ContainerStatusResourcesNotPresentError{})) } diff --git a/internal/pod/kubehelper.go b/internal/pod/kubehelper.go index 03d89e8..7f753d6 100644 --- a/internal/pod/kubehelper.go +++ b/internal/pod/kubehelper.go @@ -21,8 +21,11 @@ import ( "errors" "fmt" "strconv" + "time" "github.com/ExpediaGroup/container-startup-autoscaler/internal/common" + "github.com/ExpediaGroup/container-startup-autoscaler/internal/logging" + "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/informercache" "github.com/ExpediaGroup/container-startup-autoscaler/internal/pod/podcommon" "github.com/ExpediaGroup/container-startup-autoscaler/internal/retry" retrygo "github.com/avast/retry-go/v4" @@ -33,10 +36,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + waitForCacheUpdatePollMillis = 100 + waitForCacheUpdateMaxWaitSecs = 3 +) + // KubeHelper performs operations relating to Kube pods. type KubeHelper interface { Get(context.Context, types.NamespacedName) (bool, *v1.Pod, error) - Patch(context.Context, *v1.Pod, func(*v1.Pod) (bool, *v1.Pod, error)) (*v1.Pod, error) + Patch(context.Context, *v1.Pod, func(*v1.Pod) (bool, *v1.Pod, error), bool, bool) (*v1.Pod, error) UpdateContainerResources( context.Context, *v1.Pod, @@ -44,6 +52,7 @@ type KubeHelper interface { resource.Quantity, resource.Quantity, resource.Quantity, resource.Quantity, func(pod *v1.Pod) (bool, *v1.Pod, error), + bool, ) (*v1.Pod, error) HasAnnotation(pod *v1.Pod, name string) (bool, string) ExpectedLabelValueAs(*v1.Pod, string, podcommon.Type) (any, error) @@ -87,15 +96,18 @@ func (h *kubeHelper) Get(ctx context.Context, name types.NamespacedName) (bool, return true, pod, nil } -// Patch patches the supplied pod with mutations dictated by the supplied mutatePodFunc and returns the new server -// representation of the pod. Patches are retried and specially handled if there's a conflict: the latest version is -// retrieved and the patch is reapplied before attempting again. The supplied pod is never mutated. +// Patch applies the mutations dictated by mutatePodFunc to either the 'resize' subresource of the supplied pod, or the +// pod itself. If mustSyncCache is true, it waits for the patched pod to be updated in the informer cache. It returns +// the new server representation of the pod. Patches are retried and specially handled if there's a conflict: the +// latest version is retrieved and the patch is reapplied before attempting again. The supplied pod is never mutated. func (h *kubeHelper) Patch( ctx context.Context, pod *v1.Pod, - mutatePodFunc func(*v1.Pod) (bool, *v1.Pod, error), + podMutationFunc func(*v1.Pod) (bool, *v1.Pod, error), + patchResize bool, + mustSyncCache bool, ) (*v1.Pod, error) { - shouldPatch, mutatedPod, err := mutatePodFunc(pod) + shouldPatch, mutatedPod, err := podMutationFunc(pod.DeepCopy()) if err != nil { return nil, common.WrapErrorf(err, "unable to mutate pod") } @@ -104,7 +116,13 @@ func (h *kubeHelper) Patch( } retryableFunc := func() error { - if err = h.client.Patch(ctx, mutatedPod, client.MergeFrom(pod)); err != nil { + if patchResize { + err = h.client.SubResource("resize").Patch(ctx, mutatedPod, client.MergeFrom(pod)) + } else { + err = h.client.Patch(ctx, mutatedPod, client.MergeFrom(pod)) + } + + if err != nil { if kerrors.IsConflict(err) { // Get latest pod and re-apply patch for next attempt. exists, latestPod, getErr := h.Get(ctx, types.NamespacedName{ @@ -119,7 +137,7 @@ func (h *kubeHelper) Patch( return retrygo.Unrecoverable(errors.New("pod doesn't exist when resolving conflict")) } - _, mutatedPod, _ = mutatePodFunc(latestPod) + _, mutatedPod, _ = podMutationFunc(latestPod) } return err @@ -133,25 +151,34 @@ func (h *kubeHelper) Patch( return nil, common.WrapErrorf(err, "unable to patch pod") } + if mustSyncCache { + // Wait for the patched pod to be updated in the informer cache. For example, this is necessary when updating + // the status annotation since the cache may not be updated immediately upon the next reconciliation, leading + // to inaccurate status updates that rely on accurate current status. The reconciler doesn't allow concurrent + // reconciles for same pod so subsequent reconciles will not start until this wait has completed. + _ = h.waitForCacheUpdate(ctx, mutatedPod) + } + return mutatedPod, nil } // UpdateContainerResources updates the resources (requests and limits) of the supplied containerName within the -// supplied pod. Optional additional mutations may be supplied via addMutations. The update is serviced via a patch, -// which behaves per Patch. The supplied pod is never mutated. Returns the new server representation of the pod. +// supplied pod. Optional additional mutations may be supplied via addPodMutationFunc, with the option to wait for +// these mutations to reflect in the pod informer cache via addPodMutationMustSyncCache. The update is serviced via a +// patch, which behaves per Patch. The supplied pod is never mutated. Returns the new server representation of the pod. func (h *kubeHelper) UpdateContainerResources( ctx context.Context, pod *v1.Pod, containerName string, cpuRequests resource.Quantity, cpuLimits resource.Quantity, memoryRequests resource.Quantity, memoryLimits resource.Quantity, - addMutations func(pod *v1.Pod) (bool, *v1.Pod, error), + addPodMutationFunc func(pod *v1.Pod) (bool, *v1.Pod, error), + addPodMutationMustSyncCache bool, ) (*v1.Pod, error) { - mutatePodFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { - mutatedPod := pod.DeepCopy() + mutateResizeFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { var container *v1.Container - for _, c := range mutatedPod.Spec.Containers { + for _, c := range pod.Spec.Containers { if c.Name == containerName { container = &c break @@ -166,21 +193,19 @@ func (h *kubeHelper) UpdateContainerResources( container.Resources.Requests[v1.ResourceMemory] = memoryRequests container.Resources.Limits[v1.ResourceMemory] = memoryLimits - if addMutations != nil { - var err error - // 'Should patch' ignored here as supplementary to patching resources. - _, mutatedPod, err = addMutations(mutatedPod) - if err != nil { - return false, nil, common.WrapErrorf(err, "unable to apply additional pod mutations") - } - } - - return true, mutatedPod, nil + return true, pod, nil } - newPod, err := h.Patch(ctx, pod, mutatePodFunc) + newPod, err := h.Patch(ctx, pod, mutateResizeFunc, true, false) if err != nil { - return nil, common.WrapErrorf(err, "unable to patch pod") + return nil, common.WrapErrorf(err, "unable to patch pod resize subresource") + } + + if addPodMutationFunc != nil { + newPod, err = h.Patch(ctx, newPod, addPodMutationFunc, false, addPodMutationMustSyncCache) + if err != nil { + return nil, common.WrapErrorf(err, "unable to patch pod additional mutations") + } } return newPod, nil @@ -251,3 +276,30 @@ func (h *kubeHelper) expectedLabelOrAnnotationAs( panic(fmt.Errorf("as '%s' not supported", as)) } + +// waitForCacheUpdate waits for the informer cache to update a pod with at least the resource version indicated by the +// supplied pod. Returns the new representation of the pod if found within a timeout period, otherwise nil. +func (h *kubeHelper) waitForCacheUpdate(ctx context.Context, pod *v1.Pod) *v1.Pod { + ticker := time.NewTicker(waitForCacheUpdatePollMillis * time.Millisecond) + defer ticker.Stop() + timeout := time.After(waitForCacheUpdateMaxWaitSecs * time.Second) + + pollCount := 0 + for { + select { + case <-ticker.C: + pollCount++ + exists, podFromCache, err := h.Get(ctx, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}) + if err == nil && exists && podFromCache.ResourceVersion >= pod.ResourceVersion { + logging.Infof(ctx, logging.VTrace, "pod polled from cache %d time(s) in total", pollCount) + informercache.SyncPoll().Observe(float64(pollCount)) + return podFromCache + } + + case <-timeout: + logging.Infof(ctx, logging.VDebug, "cache wasn't updated in time") + informercache.SyncTimeout().Inc() + return nil + } + } +} diff --git a/internal/pod/kubehelper_test.go b/internal/pod/kubehelper_test.go index ab4941d..0ddaeef 100644 --- a/internal/pod/kubehelper_test.go +++ b/internal/pod/kubehelper_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/ExpediaGroup/container-startup-autoscaler/internal/context/contexttest" + "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/informercache" "github.com/ExpediaGroup/container-startup-autoscaler/internal/metrics/retry" "github.com/ExpediaGroup/container-startup-autoscaler/internal/pod/podcommon" "github.com/ExpediaGroup/container-startup-autoscaler/internal/pod/podtest" @@ -63,7 +64,7 @@ func TestKubeHelperGet(t *testing.T) { { name: "UnableToGetPod", client: podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, + func() *kubefake.Clientset { return kubefake.NewClientset() }, func() interceptor.Funcs { return interceptor.Funcs{Get: podtest.InterceptorFuncGetFail()} }, ), args: args{ @@ -77,7 +78,7 @@ func TestKubeHelperGet(t *testing.T) { { name: "NotFound", client: podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, + func() *kubefake.Clientset { return kubefake.NewClientset() }, func() interceptor.Funcs { return interceptor.Funcs{} }, ), args: args{ @@ -91,7 +92,7 @@ func TestKubeHelperGet(t *testing.T) { name: "Found", client: podtest.ControllerRuntimeFakeClientWithKubeFake( func() *kubefake.Clientset { - return kubefake.NewSimpleClientset( + return kubefake.NewClientset( podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), ) }, @@ -124,7 +125,7 @@ func TestKubeHelperGet(t *testing.T) { func TestKubeHelperPatch(t *testing.T) { t.Run("UnableToMutatePod", func(t *testing.T) { h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, + func() *kubefake.Clientset { return kubefake.NewClientset() }, func() interceptor.Funcs { return interceptor.Funcs{} }, )) @@ -132,6 +133,8 @@ func TestKubeHelperPatch(t *testing.T) { contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), &v1.Pod{}, func(pod *v1.Pod) (bool, *v1.Pod, error) { return false, nil, errors.New("") }, + false, + false, ) assert.Nil(t, got) assert.Contains(t, err.Error(), "unable to mutate pod") @@ -139,14 +142,16 @@ func TestKubeHelperPatch(t *testing.T) { t.Run("UnableToPatchPod", func(t *testing.T) { h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, + func() *kubefake.Clientset { return kubefake.NewClientset() }, func() interceptor.Funcs { return interceptor.Funcs{Patch: podtest.InterceptorFuncPatchFail()} }, )) got, err := h.Patch( contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), &v1.Pod{}, - func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, pod.DeepCopy(), nil }, + func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, pod, nil }, + false, + false, ) assert.Nil(t, got) assert.Contains(t, err.Error(), "unable to patch pod") @@ -155,7 +160,7 @@ func TestKubeHelperPatch(t *testing.T) { t.Run("ConflictUnableToGetPod", func(t *testing.T) { conflictErr := kerrors.NewConflict(schema.GroupResource{}, "", errors.New("")) h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, + func() *kubefake.Clientset { return kubefake.NewClientset() }, func() interceptor.Funcs { return interceptor.Funcs{ Patch: podtest.InterceptorFuncPatchFail(conflictErr), @@ -167,7 +172,9 @@ func TestKubeHelperPatch(t *testing.T) { got, err := h.Patch( contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), &v1.Pod{}, - func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, pod.DeepCopy(), nil }, + func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, pod, nil }, + false, + false, ) assert.Nil(t, got) assert.Contains(t, err.Error(), "unable to get pod when resolving conflict") @@ -177,7 +184,7 @@ func TestKubeHelperPatch(t *testing.T) { conflictErr := kerrors.NewConflict(schema.GroupResource{}, "", errors.New("")) notFoundErr := kerrors.NewNotFound(schema.GroupResource{}, "") h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, + func() *kubefake.Clientset { return kubefake.NewClientset() }, func() interceptor.Funcs { return interceptor.Funcs{ Patch: podtest.InterceptorFuncPatchFail(conflictErr), @@ -189,35 +196,42 @@ func TestKubeHelperPatch(t *testing.T) { got, err := h.Patch( contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), &v1.Pod{}, - func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, pod.DeepCopy(), nil }, + func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, pod, nil }, + false, + false, ) assert.Nil(t, got) assert.Contains(t, err.Error(), "pod doesn't exist when resolving conflict") }) - t.Run("OkWithoutConflict", func(t *testing.T) { + t.Run("OkNoPatchResizeTrue", func(t *testing.T) { cpuRequests, cpuLimits := resource.MustParse("89998m"), resource.MustParse("99999m") memoryRequests, memoryLimits := resource.MustParse("89998M"), resource.MustParse("99999M") pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() - mutatePodFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { - mutatedPod := pod.DeepCopy() - mutatedPod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = cpuRequests - mutatedPod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = cpuLimits - mutatedPod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = memoryRequests - mutatedPod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = memoryLimits - return true, mutatedPod, nil + podMutationFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { + pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = cpuRequests + pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = cpuLimits + pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = memoryRequests + pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = memoryLimits + return false, pod, nil } h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset(pod) }, + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, func() interceptor.Funcs { return interceptor.Funcs{} }, )) - got, err := h.Patch(contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), pod, mutatePodFunc) + got, err := h.Patch( + contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), + pod, + podMutationFunc, + true, + true, + ) assert.Nil(t, err) - assert.True(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) - assert.True(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceCPU].Equal(cpuLimits)) - assert.True(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceMemory].Equal(memoryRequests)) - assert.True(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceMemory].Equal(memoryLimits)) + assert.False(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) + assert.False(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceCPU].Equal(cpuLimits)) + assert.False(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceMemory].Equal(memoryRequests)) + assert.False(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceMemory].Equal(memoryLimits)) // Ensure original pod isn't mutated assert.False(t, pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) @@ -226,28 +240,33 @@ func TestKubeHelperPatch(t *testing.T) { assert.False(t, pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory].Equal(memoryLimits)) }) - t.Run("OkWithResolvedConflict", func(t *testing.T) { + t.Run("OkWithResolvedConflictResizeTrue", func(t *testing.T) { cpuRequests, cpuLimits := resource.MustParse("89998m"), resource.MustParse("99999m") memoryRequests, memoryLimits := resource.MustParse("89998M"), resource.MustParse("99999M") pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() - mutatePodFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { - mutatedPod := pod.DeepCopy() - mutatedPod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = cpuRequests - mutatedPod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = cpuLimits - mutatedPod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = memoryRequests - mutatedPod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = memoryLimits - return true, mutatedPod, nil + podMutationFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { + pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = cpuRequests + pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = cpuLimits + pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = memoryRequests + pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = memoryLimits + return true, pod, nil } conflictErr := kerrors.NewConflict(schema.GroupResource{}, "", errors.New("")) h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset(pod) }, + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, func() interceptor.Funcs { - return interceptor.Funcs{Patch: podtest.InterceptorFuncPatchFailFirstOnly(conflictErr)} + return interceptor.Funcs{SubResourcePatch: podtest.InterceptorFuncSubResourcePatchFailFirstOnly(conflictErr)} }, )) beforeMetricVal, _ := testutil.GetCounterMetricValue(retry.Retry(strings.ToLower(string(metav1.StatusReasonConflict)))) - got, err := h.Patch(contexttest.NewCtxBuilder(contexttest.NewOneRetryCtxConfig(nil)).Build(), pod, mutatePodFunc) + got, err := h.Patch( + contexttest.NewCtxBuilder(contexttest.NewOneRetryCtxConfig(nil)).Build(), + pod, + podMutationFunc, + true, + true, + ) assert.Nil(t, err) assert.True(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) assert.True(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceCPU].Equal(cpuLimits)) @@ -257,29 +276,34 @@ func TestKubeHelperPatch(t *testing.T) { assert.Equal(t, beforeMetricVal+1, afterMetricVal) }) - t.Run("OkNoPatch", func(t *testing.T) { + t.Run("OkWithoutConflictResizeTrue", func(t *testing.T) { cpuRequests, cpuLimits := resource.MustParse("89998m"), resource.MustParse("99999m") memoryRequests, memoryLimits := resource.MustParse("89998M"), resource.MustParse("99999M") pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() - mutatePodFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { - mutatedPod := pod.DeepCopy() - mutatedPod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = cpuRequests - mutatedPod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = cpuLimits - mutatedPod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = memoryRequests - mutatedPod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = memoryLimits - return false, mutatedPod, nil + podMutationFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { + pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = cpuRequests + pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = cpuLimits + pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = memoryRequests + pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = memoryLimits + return true, pod, nil } h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset(pod) }, + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, func() interceptor.Funcs { return interceptor.Funcs{} }, )) - got, err := h.Patch(contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), pod, mutatePodFunc) + got, err := h.Patch( + contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), + pod, + podMutationFunc, + true, + true, + ) assert.Nil(t, err) - assert.False(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) - assert.False(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceCPU].Equal(cpuLimits)) - assert.False(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceMemory].Equal(memoryRequests)) - assert.False(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceMemory].Equal(memoryLimits)) + assert.True(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) + assert.True(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceCPU].Equal(cpuLimits)) + assert.True(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceMemory].Equal(memoryRequests)) + assert.True(t, got.Spec.Containers[0].Resources.Limits[v1.ResourceMemory].Equal(memoryLimits)) // Ensure original pod isn't mutated assert.False(t, pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) @@ -287,6 +311,32 @@ func TestKubeHelperPatch(t *testing.T) { assert.False(t, pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory].Equal(memoryRequests)) assert.False(t, pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory].Equal(memoryLimits)) }) + + t.Run("OkWithoutConflictResizeFalse", func(t *testing.T) { + pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() + podMutationFunc := func(pod *v1.Pod) (bool, *v1.Pod, error) { + pod.Annotations["test"] = "test" + return true, pod, nil + } + h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, + func() interceptor.Funcs { return interceptor.Funcs{} }, + )) + + got, err := h.Patch( + contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), + pod, + podMutationFunc, + false, + false, + ) + assert.Nil(t, err) + assert.Equal(t, "test", got.Annotations["test"]) + + // Ensure original pod isn't mutated + _, gotAnn := pod.Annotations["test"] + assert.False(t, gotAnn) + }) } func TestKubeHelperUpdateContainerResources(t *testing.T) { @@ -300,13 +350,19 @@ func TestKubeHelperUpdateContainerResources(t *testing.T) { resource.Quantity{}, resource.Quantity{}, resource.Quantity{}, resource.Quantity{}, nil, + false, ) assert.Nil(t, got) assert.Contains(t, err.Error(), "container not present") }) - t.Run("UnableToApplyAdditionalPodMutations", func(t *testing.T) { - h := newKubeHelper(nil) + t.Run("UnableToPatchPodResizeSubresource", func(t *testing.T) { + h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( + func() *kubefake.Clientset { return kubefake.NewClientset() }, + func() interceptor.Funcs { + return interceptor.Funcs{SubResourcePatch: podtest.InterceptorFuncSubResourcePatchFail()} + }, + )) got, err := h.UpdateContainerResources( contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), @@ -314,30 +370,35 @@ func TestKubeHelperUpdateContainerResources(t *testing.T) { podtest.DefaultContainerName, resource.Quantity{}, resource.Quantity{}, resource.Quantity{}, resource.Quantity{}, - func(pod *v1.Pod) (bool, *v1.Pod, error) { - return false, nil, errors.New("") - }, + nil, + false, ) assert.Nil(t, got) - assert.Contains(t, err.Error(), "unable to apply additional pod mutations") + assert.Contains(t, err.Error(), "unable to patch pod resize subresource") }) - t.Run("UnableToPatchPod", func(t *testing.T) { + t.Run("UnableToApplyAdditionalPodMutations", func(t *testing.T) { + pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() + cpuRequests, cpuLimits := resource.MustParse("89998m"), resource.MustParse("99999m") + memoryRequests, memoryLimits := resource.MustParse("89998M"), resource.MustParse("99999M") h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, - func() interceptor.Funcs { return interceptor.Funcs{Patch: podtest.InterceptorFuncPatchFail()} }, + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, + func() interceptor.Funcs { return interceptor.Funcs{} }, )) got, err := h.UpdateContainerResources( contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), - podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), + pod, podtest.DefaultContainerName, - resource.Quantity{}, resource.Quantity{}, - resource.Quantity{}, resource.Quantity{}, - nil, + cpuRequests, cpuLimits, + memoryRequests, memoryLimits, + func(pod *v1.Pod) (bool, *v1.Pod, error) { + return false, nil, errors.New("") + }, + false, ) assert.Nil(t, got) - assert.Contains(t, err.Error(), "unable to patch pod") + assert.Contains(t, err.Error(), "unable to patch pod additional mutations") }) t.Run("Ok", func(t *testing.T) { @@ -345,7 +406,7 @@ func TestKubeHelperUpdateContainerResources(t *testing.T) { cpuRequests, cpuLimits := resource.MustParse("89998m"), resource.MustParse("99999m") memoryRequests, memoryLimits := resource.MustParse("89998M"), resource.MustParse("99999M") h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset(pod) }, + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, func() interceptor.Funcs { return interceptor.Funcs{} }, )) @@ -359,6 +420,7 @@ func TestKubeHelperUpdateContainerResources(t *testing.T) { pod.Annotations["test"] = "test" return true, pod, nil }, + false, ) assert.Nil(t, err) assert.True(t, got.Spec.Containers[0].Resources.Requests[v1.ResourceCPU].Equal(cpuRequests)) @@ -464,7 +526,7 @@ func TestKubeHelperExpectedLabelValueAs(t *testing.T) { name: "test", as: "test", }, - wantPanicErrMsg: fmt.Sprintf("as 'test' not supported"), + wantPanicErrMsg: "as 'test' not supported", }, { name: "Ok", @@ -584,3 +646,41 @@ func TestKubeHelperResizeStatus(t *testing.T) { got := h.ResizeStatus(pod) assert.Equal(t, v1.PodResizeStatusInProgress, got) } + +func TestKubeHelperWaitForCacheUpdate(t *testing.T) { + t.Run("Ok", func(t *testing.T) { + pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() + pod.ResourceVersion = "123" + h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, + func() interceptor.Funcs { return interceptor.Funcs{} }, + )) + + beforeMetricVal, _ := testutil.GetHistogramMetricValue(informercache.SyncPoll()) + newPod := h.waitForCacheUpdate( + contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), + pod, + ) + assert.NotNil(t, newPod) + afterMetricVal, _ := testutil.GetHistogramMetricValue(informercache.SyncPoll()) + assert.Equal(t, beforeMetricVal+1, afterMetricVal) + }) + + t.Run("Timeout", func(t *testing.T) { + pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() + pod.ResourceVersion = "123" + h := newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( + func() *kubefake.Clientset { return kubefake.NewClientset(&v1.Pod{}) }, + func() interceptor.Funcs { return interceptor.Funcs{} }, + )) + + beforeMetricVal, _ := testutil.GetCounterMetricValue(informercache.SyncTimeout()) + newPod := h.waitForCacheUpdate( + contexttest.NewCtxBuilder(contexttest.NewNoRetryCtxConfig(nil)).Build(), + pod, + ) + assert.Nil(t, newPod) + afterMetricVal, _ := testutil.GetCounterMetricValue(informercache.SyncTimeout()) + assert.Equal(t, beforeMetricVal+1, afterMetricVal) + }) +} diff --git a/internal/pod/podcommon/stateconst.go b/internal/pod/podcommon/stateconst.go index 671d004..9037294 100644 --- a/internal/pod/podcommon/stateconst.go +++ b/internal/pod/podcommon/stateconst.go @@ -77,23 +77,6 @@ func (s StateResources) HumanReadable() string { } } -// StateAllocatedResources indicates the state of a Kube container's allocated resources. -type StateAllocatedResources string - -const ( - // StateAllocatedResourcesIncomplete indicates allocated resources are incomplete. - StateAllocatedResourcesIncomplete StateAllocatedResources = "incomplete" - - // StateAllocatedResourcesContainerRequestsMatch indicates allocated resources match container requests. - StateAllocatedResourcesContainerRequestsMatch StateAllocatedResources = "containerrequestsmatch" - - // StateAllocatedResourcesContainerRequestsMismatch indicates allocated resources don't match container requests. - StateAllocatedResourcesContainerRequestsMismatch StateAllocatedResources = "containerrequestsmismatch" - - // StateAllocatedResourcesUnknown indicates allocated resources are unknown. - StateAllocatedResourcesUnknown StateAllocatedResources = "unknown" -) - // StateStatusResources indicates the state of a Kube container's status resources. type StateStatusResources string diff --git a/internal/pod/podcommon/states.go b/internal/pod/podcommon/states.go index e7bf979..03e8d6f 100644 --- a/internal/pod/podcommon/states.go +++ b/internal/pod/podcommon/states.go @@ -18,14 +18,13 @@ package podcommon // States holds information related to the current state of the target container. type States struct { - StartupProbe StateBool `json:"startupProbe"` - ReadinessProbe StateBool `json:"readinessProbe"` - Container StateContainer `json:"container"` - Started StateBool `json:"started"` - Ready StateBool `json:"ready"` - Resources StateResources `json:"resources"` - AllocatedResources StateAllocatedResources `json:"allocatedResources"` - StatusResources StateStatusResources `json:"statusResources"` + StartupProbe StateBool `json:"startupProbe"` + ReadinessProbe StateBool `json:"readinessProbe"` + Container StateContainer `json:"container"` + Started StateBool `json:"started"` + Ready StateBool `json:"ready"` + Resources StateResources `json:"resources"` + StatusResources StateStatusResources `json:"statusResources"` } func NewStates( @@ -35,30 +34,27 @@ func NewStates( started StateBool, ready StateBool, stateResources StateResources, - stateAllocatedResources StateAllocatedResources, stateStatusResources StateStatusResources, ) States { return States{ - StartupProbe: startupProbe, - ReadinessProbe: readinessProbe, - Container: stateContainer, - Started: started, - Ready: ready, - Resources: stateResources, - AllocatedResources: stateAllocatedResources, - StatusResources: stateStatusResources, + StartupProbe: startupProbe, + ReadinessProbe: readinessProbe, + Container: stateContainer, + Started: started, + Ready: ready, + Resources: stateResources, + StatusResources: stateStatusResources, } } func NewStatesAllUnknown() States { return States{ - StartupProbe: StateBoolUnknown, - ReadinessProbe: StateBoolUnknown, - Container: StateContainerUnknown, - Started: StateBoolUnknown, - Ready: StateBoolUnknown, - Resources: StateResourcesUnknown, - AllocatedResources: StateAllocatedResourcesUnknown, - StatusResources: StateStatusResourcesUnknown, + StartupProbe: StateBoolUnknown, + ReadinessProbe: StateBoolUnknown, + Container: StateContainerUnknown, + Started: StateBoolUnknown, + Ready: StateBoolUnknown, + Resources: StateResourcesUnknown, + StatusResources: StateStatusResourcesUnknown, } } diff --git a/internal/pod/podcommon/states_test.go b/internal/pod/podcommon/states_test.go index 873194d..d8a009e 100644 --- a/internal/pod/podcommon/states_test.go +++ b/internal/pod/podcommon/states_test.go @@ -30,7 +30,6 @@ func TestNewStates(t *testing.T) { StateBoolUnknown, StateBoolUnknown, StateResourcesUnknown, - StateAllocatedResourcesUnknown, StateStatusResourcesUnknown, ) assert.Equal(t, StateBoolUnknown, s.StartupProbe) @@ -39,7 +38,6 @@ func TestNewStates(t *testing.T) { assert.Equal(t, StateBoolUnknown, s.Started) assert.Equal(t, StateBoolUnknown, s.Ready) assert.Equal(t, StateResourcesUnknown, s.Resources) - assert.Equal(t, StateAllocatedResourcesUnknown, s.AllocatedResources) assert.Equal(t, StateStatusResourcesUnknown, s.StatusResources) } @@ -51,6 +49,5 @@ func TestNewStatesAllUnknown(t *testing.T) { assert.Equal(t, StateBoolUnknown, s.Started) assert.Equal(t, StateBoolUnknown, s.Ready) assert.Equal(t, StateResourcesUnknown, s.Resources) - assert.Equal(t, StateAllocatedResourcesUnknown, s.AllocatedResources) assert.Equal(t, StateStatusResourcesUnknown, s.StatusResources) } diff --git a/internal/pod/podcommon/statusannotation_test.go b/internal/pod/podcommon/statusannotation_test.go index ffba7c8..3c710b8 100644 --- a/internal/pod/podcommon/statusannotation_test.go +++ b/internal/pod/podcommon/statusannotation_test.go @@ -49,14 +49,14 @@ func TestNewStatusAnnotation(t *testing.T) { func TestStatusAnnotationJson(t *testing.T) { j := NewStatusAnnotation( "status", - NewStates("1", "2", "3", "4", "5", "6", "7", "8"), + NewStates("1", "2", "3", "4", "5", "6", "7"), NewStatusAnnotationScale("lastCommanded", "lastEnacted", "lastFailed"), "lastUpdated", ).Json() assert.Equal( t, "{\"status\":\"status\","+ - "\"states\":{\"startupProbe\":\"1\",\"readinessProbe\":\"2\",\"container\":\"3\",\"started\":\"4\",\"ready\":\"5\",\"resources\":\"6\",\"allocatedResources\":\"7\",\"statusResources\":\"8\"},"+ + "\"states\":{\"startupProbe\":\"1\",\"readinessProbe\":\"2\",\"container\":\"3\",\"started\":\"4\",\"ready\":\"5\",\"resources\":\"6\",\"statusResources\":\"7\"},"+ "\"scale\":{\"lastCommanded\":\"lastCommanded\",\"lastEnacted\":\"lastEnacted\",\"lastFailed\":\"lastFailed\"},"+ "\"lastUpdated\":\"lastUpdated\"}", j, @@ -116,7 +116,7 @@ func TestStatusAnnotationFromString(t *testing.T) { t.Run("Ok", func(t *testing.T) { got, err := StatusAnnotationFromString( "{\"status\":\"status\"," + - "\"states\":{\"startupProbe\":\"1\",\"readinessProbe\":\"2\",\"container\":\"3\",\"started\":\"4\",\"ready\":\"5\",\"resources\":\"6\",\"allocatedResources\":\"7\",\"statusResources\":\"8\"}," + + "\"states\":{\"startupProbe\":\"1\",\"readinessProbe\":\"2\",\"container\":\"3\",\"started\":\"4\",\"ready\":\"5\",\"resources\":\"6\",\"statusResources\":\"7\"}," + "\"scale\":{\"lastCommanded\":\"lastCommanded\",\"lastEnacted\":\"lastEnacted\",\"lastFailed\":\"lastFailed\"}," + "\"lastUpdated\":\"lastUpdated\"}", ) @@ -125,7 +125,7 @@ func TestStatusAnnotationFromString(t *testing.T) { t, NewStatusAnnotation( "status", - NewStates("1", "2", "3", "4", "5", "6", "7", "8"), + NewStates("1", "2", "3", "4", "5", "6", "7"), NewStatusAnnotationScale("lastCommanded", "lastEnacted", "lastFailed"), "lastUpdated", ), diff --git a/internal/pod/podtest/mockcontainerkubehelper.go b/internal/pod/podtest/mockcontainerkubehelper.go index 4d7d5bd..5278a23 100644 --- a/internal/pod/podtest/mockcontainerkubehelper.go +++ b/internal/pod/podtest/mockcontainerkubehelper.go @@ -81,15 +81,6 @@ func (m *MockContainerKubeHelper) ResizePolicy( return args.Get(0).(v1.ResourceResizeRestartPolicy), args.Error(1) } -func (m *MockContainerKubeHelper) AllocatedResources( - pod *v1.Pod, - containerName string, - resourceName v1.ResourceName, -) (resource.Quantity, error) { - args := m.Called(pod, containerName, resourceName) - return args.Get(0).(resource.Quantity), args.Error(1) -} - func (m *MockContainerKubeHelper) CurrentRequests( pod *v1.Pod, containerName string, @@ -146,11 +137,6 @@ func (m *MockContainerKubeHelper) ResizePolicyDefault() { m.On("ResizePolicy", mock.Anything, mock.Anything).Return(v1.NotRequired, nil) } -func (m *MockContainerKubeHelper) AllocatedResourcesDefault() { - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceCPU).Return(MockDefaultCpuQuantity, nil) - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceMemory).Return(MockDefaultMemoryQuantity, nil) -} - func (m *MockContainerKubeHelper) CurrentRequestsDefault() { m.On("CurrentRequests", mock.Anything, mock.Anything, v1.ResourceCPU).Return(MockDefaultCpuQuantity, nil) m.On("CurrentRequests", mock.Anything, mock.Anything, v1.ResourceMemory).Return(MockDefaultMemoryQuantity, nil) diff --git a/internal/pod/podtest/mockkubehelper.go b/internal/pod/podtest/mockkubehelper.go index 862ac07..f354b06 100644 --- a/internal/pod/podtest/mockkubehelper.go +++ b/internal/pod/podtest/mockkubehelper.go @@ -47,8 +47,10 @@ func (m *MockKubeHelper) Patch( ctx context.Context, originalPod *v1.Pod, mutatePodFunc func(*v1.Pod) (bool, *v1.Pod, error), + patchResize bool, + mustSyncCache bool, ) (*v1.Pod, error) { - args := m.Called(ctx, originalPod, mutatePodFunc) + args := m.Called(ctx, originalPod, mutatePodFunc, patchResize, mustSyncCache) return args.Get(0).(*v1.Pod), args.Error(1) } @@ -58,9 +60,10 @@ func (m *MockKubeHelper) UpdateContainerResources( containerName string, cpuRequests resource.Quantity, cpuLimits resource.Quantity, memoryRequests resource.Quantity, memoryLimits resource.Quantity, - addMutations func(pod *v1.Pod) (bool, *v1.Pod, error), + addPodMutationFunc func(pod *v1.Pod) (bool, *v1.Pod, error), + addPodMutationMustSyncCache bool, ) (*v1.Pod, error) { - args := m.Called(ctx, pod, containerName, cpuRequests, cpuLimits, memoryRequests, memoryLimits, addMutations) + args := m.Called(ctx, pod, containerName, cpuRequests, cpuLimits, memoryRequests, memoryLimits, addPodMutationFunc, addPodMutationMustSyncCache) return args.Get(0).(*v1.Pod), args.Error(1) } @@ -94,11 +97,11 @@ func (m *MockKubeHelper) GetDefault() { } func (m *MockKubeHelper) PatchDefault() { - m.On("Patch", mock.Anything, mock.Anything, mock.Anything).Return(&v1.Pod{}, nil) + m.On("Patch", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&v1.Pod{}, nil) } func (m *MockKubeHelper) UpdateContainerResourcesDefault() { - m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(&v1.Pod{}, nil) } diff --git a/internal/pod/podtest/mockstatus.go b/internal/pod/podtest/mockstatus.go index 7804f2f..7ae7804 100644 --- a/internal/pod/podtest/mockstatus.go +++ b/internal/pod/podtest/mockstatus.go @@ -52,7 +52,7 @@ func (m *MockStatus) Update( return args.Get(0).(*v1.Pod), args.Error(1) } -func (m *MockStatus) UpdateMutatePodFunc( +func (m *MockStatus) PodMutationFunc( ctx context.Context, status string, states podcommon.States, @@ -73,16 +73,16 @@ func (m *MockStatus) UpdateDefaultAndRun(run func()) { Run(func(args mock.Arguments) { run() }) } -func (m *MockStatus) UpdateMutatePodFuncDefault() { - m.On("UpdateMutatePodFunc", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( +func (m *MockStatus) PodMutationFuncDefault() { + m.On("PodMutationFunc", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, &v1.Pod{}, nil }, ) } -func (m *MockStatus) UpdateMutatePodFuncDefaultAndRun(run func()) { - m.On("UpdateMutatePodFunc", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( +func (m *MockStatus) PodMutationFuncDefaultAndRun(run func()) { + m.On("PodMutationFunc", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( func(pod *v1.Pod) (bool, *v1.Pod, error) { return true, &v1.Pod{}, nil }, diff --git a/internal/pod/podtest/mocktargetcontainerstate.go b/internal/pod/podtest/mocktargetcontainerstate.go index 87a43d5..4de825c 100644 --- a/internal/pod/podtest/mocktargetcontainerstate.go +++ b/internal/pod/podtest/mocktargetcontainerstate.go @@ -53,7 +53,6 @@ func (m *MockTargetContainerState) StatesDefault() { podcommon.StateBoolTrue, podcommon.StateBoolTrue, podcommon.StateResourcesStartup, - podcommon.StateAllocatedResourcesContainerRequestsMatch, podcommon.StateStatusResourcesContainerResourcesMatch, ), nil, diff --git a/internal/pod/podtest/pod.go b/internal/pod/podtest/pod.go index ae30a89..8d2c82d 100644 --- a/internal/pod/podtest/pod.go +++ b/internal/pod/podtest/pod.go @@ -71,28 +71,26 @@ var ( // podConfig holds configuration for generating a test pod. type podConfig struct { - namespace string - name string - labelEnabledValue string - annotationTargetContainerName string - annotationCpuStartup string - annotationCpuPostStartupRequests string - annotationCpuPostStartupLimits string - annotationMemoryStartup string - annotationMemoryPostStartupRequests string - annotationMemoryPostStartupLimits string - statusContainerName string - statusContainerState corev1.ContainerState - statusContainerStarted bool - statusContainerReady bool - statusContainerAllocatedResourcesCpu string - statusContainerAllocatedResourcesMemory string - statusContainerResourcesCpuRequests string - statusContainerResourcesCpuLimits string - statusContainerResourcesMemoryRequests string - statusContainerResourcesMemoryLimits string - statusResize corev1.PodResizeStatus - containerConfig containerConfig + namespace string + name string + labelEnabledValue string + annotationTargetContainerName string + annotationCpuStartup string + annotationCpuPostStartupRequests string + annotationCpuPostStartupLimits string + annotationMemoryStartup string + annotationMemoryPostStartupRequests string + annotationMemoryPostStartupLimits string + statusContainerName string + statusContainerState corev1.ContainerState + statusContainerStarted bool + statusContainerReady bool + statusContainerResourcesCpuRequests string + statusContainerResourcesCpuLimits string + statusContainerResourcesMemoryRequests string + statusContainerResourcesMemoryLimits string + statusResize corev1.PodResizeStatus + containerConfig containerConfig } func NewStartupPodConfig(stateStarted podcommon.StateBool, stateReady podcommon.StateBool) podConfig { @@ -149,24 +147,18 @@ func newPodConfigForState( switch stateResources { case podcommon.StateResourcesStartup: - config.statusContainerAllocatedResourcesCpu = PodAnnotationCpuStartup - config.statusContainerAllocatedResourcesMemory = PodAnnotationMemoryStartup config.statusContainerResourcesCpuRequests = PodAnnotationCpuStartup config.statusContainerResourcesCpuLimits = PodAnnotationCpuStartup config.statusContainerResourcesMemoryRequests = PodAnnotationMemoryStartup config.statusContainerResourcesMemoryLimits = PodAnnotationMemoryStartup case podcommon.StateResourcesPostStartup: - config.statusContainerAllocatedResourcesCpu = PodAnnotationCpuPostStartupRequests - config.statusContainerAllocatedResourcesMemory = PodAnnotationMemoryPostStartupRequests config.statusContainerResourcesCpuRequests = PodAnnotationCpuPostStartupRequests config.statusContainerResourcesCpuLimits = PodAnnotationCpuPostStartupLimits config.statusContainerResourcesMemoryRequests = PodAnnotationMemoryPostStartupRequests config.statusContainerResourcesMemoryLimits = PodAnnotationMemoryPostStartupLimits case podcommon.StateResourcesUnknown: - config.statusContainerAllocatedResourcesCpu = PodAnnotationCpuUnknown - config.statusContainerAllocatedResourcesMemory = PodAnnotationMemoryUnknown config.statusContainerResourcesCpuRequests = PodAnnotationCpuUnknown config.statusContainerResourcesCpuLimits = PodAnnotationCpuUnknown config.statusContainerResourcesMemoryRequests = PodAnnotationMemoryUnknown @@ -214,10 +206,6 @@ func pod(config podConfig) *corev1.Pod { State: config.statusContainerState, Started: &config.statusContainerStarted, Ready: config.statusContainerReady, - AllocatedResources: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceCPU: resource.MustParse(config.statusContainerAllocatedResourcesCpu), - corev1.ResourceMemory: resource.MustParse(config.statusContainerAllocatedResourcesMemory), - }, Resources: &corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceCPU: resource.MustParse(config.statusContainerResourcesCpuRequests), diff --git a/internal/pod/podtest/podbuilder.go b/internal/pod/podtest/podbuilder.go index 6be9cf3..a4747ac 100644 --- a/internal/pod/podtest/podbuilder.go +++ b/internal/pod/podtest/podbuilder.go @@ -29,7 +29,6 @@ type podBuilder struct { containerStatusResizeStatus *v1.PodResizeStatus nilContainerStatusStarted bool nilContainerStatusResources bool - nilStatusAllocatedResources bool } func NewPodBuilder(config podConfig) *podBuilder { @@ -66,11 +65,6 @@ func (b *podBuilder) NilContainerStatusResources() *podBuilder { return b } -func (b *podBuilder) NilStatusAllocatedResources() *podBuilder { - b.nilStatusAllocatedResources = true - return b -} - func (b *podBuilder) Build() *v1.Pod { p := pod(b.config) @@ -98,9 +92,5 @@ func (b *podBuilder) Build() *v1.Pod { p.Status.ContainerStatuses[0].Resources = nil } - if b.nilStatusAllocatedResources { - p.Status.ContainerStatuses[0].AllocatedResources = nil - } - return p } diff --git a/internal/pod/podtest/podinterceptor.go b/internal/pod/podtest/podinterceptor.go index 2610554..34874c3 100644 --- a/internal/pod/podtest/podinterceptor.go +++ b/internal/pod/podtest/podinterceptor.go @@ -26,13 +26,15 @@ import ( ) var ( - ctxUuidGetInvocs = map[string]int{} - ctxUuidPatchInvocs = map[string]int{} + ctxUuidGetInvocs = map[string]int{} + ctxUuidPatchInvocs = map[string]int{} + ctxUuidSubResourcePatchInvocs = map[string]int{} ) var ( - getMutex sync.Mutex - patchMutex sync.Mutex + getMutex sync.Mutex + patchMutex sync.Mutex + subResourcePatchMutex sync.Mutex ) // InterceptorFuncGetFail returns an interceptor get function that fails. Returns withError if supplied, otherwise an @@ -116,3 +118,40 @@ func InterceptorFuncPatchFailFirstOnly(withFirstError ...error) func(_ context.C return nil } } + +// InterceptorFuncSubResourcePatchFail returns an interceptor subresource patch function that fails. Returns withError +// if supplied, otherwise an error with an empty message. +func InterceptorFuncSubResourcePatchFail(withError ...error) func(_ context.Context, _ client.Client, _ string, _ client.Object, _ client.Patch, _ ...client.SubResourcePatchOption) error { + if len(withError) > 1 { + panic("only 0 or 1 errors can be supplied") + } + + return func(_ context.Context, _ client.Client, _ string, _ client.Object, _ client.Patch, _ ...client.SubResourcePatchOption) error { + if len(withError) == 0 { + return errors.New("") + } + return withError[0] + } +} + +// InterceptorFuncSubResourcePatchFailFirstOnly returns an interceptor subresource patch function that fails on the +// first invocation only. Returns withError if supplied, otherwise an error with an empty message. +func InterceptorFuncSubResourcePatchFailFirstOnly(withFirstError ...error) func(_ context.Context, _ client.Client, _ string, _ client.Object, _ client.Patch, _ ...client.SubResourcePatchOption) error { + return func(ctx context.Context, _ client.Client, _ string, _ client.Object, _ client.Patch, _ ...client.SubResourcePatchOption) error { + defer subResourcePatchMutex.Unlock() + subResourcePatchMutex.Lock() + + uuid := ctx.Value(contexttest.KeyUuid).(string) + current, got := ctxUuidSubResourcePatchInvocs[uuid] + if !got { + ctxUuidSubResourcePatchInvocs[uuid] = 1 + if len(withFirstError) == 0 { + return errors.New("") + } + return withFirstError[0] + } + + ctxUuidSubResourcePatchInvocs[uuid] = current + 1 + return nil + } +} diff --git a/internal/pod/status.go b/internal/pod/status.go index ba5f322..4335edd 100644 --- a/internal/pod/status.go +++ b/internal/pod/status.go @@ -37,7 +37,7 @@ const ( // Status performs operations relating to controller status. type Status interface { Update(context.Context, *v1.Pod, string, podcommon.States, podcommon.StatusScaleState) (*v1.Pod, error) - UpdateMutatePodFunc(context.Context, string, podcommon.States, podcommon.StatusScaleState) func(pod *v1.Pod) (bool, *v1.Pod, error) + PodMutationFunc(context.Context, string, podcommon.States, podcommon.StatusScaleState) func(pod *v1.Pod) (bool, *v1.Pod, error) } // status is the default implementation of Status. @@ -58,9 +58,9 @@ func (s *status) Update( states podcommon.States, scaleState podcommon.StatusScaleState, ) (*v1.Pod, error) { - mutatePodFunc := s.UpdateMutatePodFunc(ctx, status, states, scaleState) + mutatePodFunc := s.PodMutationFunc(ctx, status, states, scaleState) - newPod, err := s.kubeHelper.Patch(ctx, pod, mutatePodFunc) + newPod, err := s.kubeHelper.Patch(ctx, pod, mutatePodFunc, false, true) if err != nil { return nil, common.WrapErrorf(err, "unable to patch pod") } @@ -68,9 +68,9 @@ func (s *status) Update( return newPod, nil } -// UpdateMutatePodFunc returns a function that performs the actual work of updating controller status. This is used -// both by Update and elsewhere (package-externally) as additional mutations when patching for something else. -func (s *status) UpdateMutatePodFunc( +// PodMutationFunc returns a function that performs the actual work of updating controller status. This is used both by +// Update and elsewhere (package-externally) as additional mutations when patching for something else. +func (s *status) PodMutationFunc( ctx context.Context, status string, states podcommon.States, @@ -139,9 +139,8 @@ func (s *status) UpdateMutatePodFunc( return false, pod, nil } - mutatedPod := pod.DeepCopy() - mutatedPod.Annotations[podcommon.AnnotationStatus] = newStat.Json() - return true, mutatedPod, nil + pod.Annotations[podcommon.AnnotationStatus] = newStat.Json() + return true, pod, nil } } diff --git a/internal/pod/status_test.go b/internal/pod/status_test.go index b03b92d..681f536 100644 --- a/internal/pod/status_test.go +++ b/internal/pod/status_test.go @@ -36,7 +36,7 @@ import ( func TestStatusUpdateCore(t *testing.T) { t.Run("UnableToPatchPod", func(t *testing.T) { s := newStatus(newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset() }, + func() *kubefake.Clientset { return kubefake.NewClientset() }, func() interceptor.Funcs { return interceptor.Funcs{Patch: podtest.InterceptorFuncPatchFail()} }, ))) @@ -54,7 +54,7 @@ func TestStatusUpdateCore(t *testing.T) { t.Run("UnableToGetStatusAnnotationFromString", func(t *testing.T) { s := newStatus(newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( func() *kubefake.Clientset { - return kubefake.NewSimpleClientset( + return kubefake.NewClientset( podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), ) }, @@ -76,7 +76,7 @@ func TestStatusUpdateCore(t *testing.T) { t.Run("OkNoPreviousStatus", func(t *testing.T) { pod := podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build() s := newStatus(newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( - func() *kubefake.Clientset { return kubefake.NewSimpleClientset(pod) }, + func() *kubefake.Clientset { return kubefake.NewClientset(pod) }, func() interceptor.Funcs { return interceptor.Funcs{} }, ))) @@ -103,7 +103,7 @@ func TestStatusUpdateCore(t *testing.T) { t.Run("OkPreviousStatusSame", func(t *testing.T) { s := newStatus(newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( func() *kubefake.Clientset { - return kubefake.NewSimpleClientset( + return kubefake.NewClientset( podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), ) }, @@ -235,7 +235,7 @@ func TestStatusUpdateScaleStatus(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := newStatus(newKubeHelper(podtest.ControllerRuntimeFakeClientWithKubeFake( func() *kubefake.Clientset { - return kubefake.NewSimpleClientset( + return kubefake.NewClientset( podtest.NewPodBuilder(podtest.NewStartupPodConfig(podcommon.StateBoolFalse, podcommon.StateBoolFalse)).Build(), ) }, diff --git a/internal/pod/targetcontaineraction.go b/internal/pod/targetcontaineraction.go index 3b6cb44..4e3d528 100644 --- a/internal/pod/targetcontaineraction.go +++ b/internal/pod/targetcontaineraction.go @@ -62,9 +62,6 @@ func newTargetContainerAction( } } -// TODO(wt) might want to protect against resize flapping (startup -> post-startup -> startup ad nauseum) - disable for -// a period of time with startup resources. - // Execute performs the appropriate action for the determined target container state. func (a *targetContainerAction) Execute( ctx context.Context, @@ -223,7 +220,8 @@ func (a *targetContainerAction) notStartedWithPostStartupResAction( pod, config.GetTargetContainerName(), config.GetCpuConfig().Startup, config.GetCpuConfig().Startup, config.GetMemoryConfig().Startup, config.GetMemoryConfig().Startup, - a.status.UpdateMutatePodFunc(ctx, msg, states, podcommon.StatusScaleStateUpCommanded), + a.status.PodMutationFunc(ctx, msg, states, podcommon.StatusScaleStateUpCommanded), + true, ) if err != nil { return common.WrapErrorf(err, "unable to patch container resources") @@ -248,7 +246,8 @@ func (a *targetContainerAction) startedWithStartupResAction( pod, config.GetTargetContainerName(), config.GetCpuConfig().PostStartupRequests, config.GetCpuConfig().PostStartupLimits, config.GetMemoryConfig().PostStartupRequests, config.GetMemoryConfig().PostStartupLimits, - a.status.UpdateMutatePodFunc(ctx, msg, states, podcommon.StatusScaleStateDownCommanded), + a.status.PodMutationFunc(ctx, msg, states, podcommon.StatusScaleStateDownCommanded), + true, ) if err != nil { return common.WrapErrorf(err, "unable to patch container resources") @@ -285,7 +284,8 @@ func (a *targetContainerAction) notStartedWithUnknownResAction( pod, config.GetTargetContainerName(), config.GetCpuConfig().Startup, config.GetCpuConfig().Startup, config.GetMemoryConfig().Startup, config.GetMemoryConfig().Startup, - a.status.UpdateMutatePodFunc(ctx, msg, states, podcommon.StatusScaleStateUnknownCommanded), + a.status.PodMutationFunc(ctx, msg, states, podcommon.StatusScaleStateUnknownCommanded), + true, ) if err != nil { return common.WrapErrorf(err, "unable to patch container resources") @@ -311,7 +311,8 @@ func (a *targetContainerAction) startedWithUnknownResAction( pod, config.GetTargetContainerName(), config.GetCpuConfig().PostStartupRequests, config.GetCpuConfig().PostStartupLimits, config.GetMemoryConfig().PostStartupRequests, config.GetMemoryConfig().PostStartupLimits, - a.status.UpdateMutatePodFunc(ctx, msg, states, podcommon.StatusScaleStateUnknownCommanded), + a.status.PodMutationFunc(ctx, msg, states, podcommon.StatusScaleStateUnknownCommanded), + true, ) if err != nil { return common.WrapErrorf(err, "unable to patch container resources") @@ -399,51 +400,7 @@ func (a *targetContainerAction) processConfigEnacted( return fmt.Errorf("%s '%s'", msg, a.kubeHelper.ResizeStatus(pod)) } - // Resize is not pending, so examine AllocatedResources. - switch states.AllocatedResources { - case podcommon.StateAllocatedResourcesIncomplete: - // Target container allocated CPU and/or memory resources are missing. Log and return with the expectation that - // the missing items become available in the future. - a.logInfoAndUpdateStatus( - ctx, - logging.VDebug, - states, podcommon.StatusScaleStateNotApplicable, - pod, - "target container allocated cpu and/or memory resources currently missing", - ) - return nil - - case podcommon.StateAllocatedResourcesContainerRequestsMatch: // Want this, but here so we can panic on default below. - - case podcommon.StateAllocatedResourcesContainerRequestsMismatch: - // Target container allocated CPU and/or memory resources don't match target container's 'requests'. Log and - // return with the expectation that they match in the future. - a.logInfoAndUpdateStatus( - ctx, - logging.VDebug, - states, podcommon.StatusScaleStateNotApplicable, - pod, - "target container allocated cpu and/or memory resources currently don't match target container's 'requests'", - ) - return nil - - case podcommon.StateAllocatedResourcesUnknown: - // Target container allocated CPU and/or memory resources are unknown. Log and return with the expectation that - // they become known in the future. - a.logInfoAndUpdateStatus( - ctx, - logging.VDebug, - states, podcommon.StatusScaleStateNotApplicable, - pod, - "target container allocated cpu and/or memory resources currently unknown", - ) - return nil - - default: - panic(fmt.Errorf("unknown state '%s'", states.AllocatedResources)) - } - - // AllocatedResources is as expected - finally examine StatusResources. + // Resize is not pending, so examine StatusResources. switch states.StatusResources { case podcommon.StateStatusResourcesIncomplete: // Target container current CPU and/or memory resources are missing. Log and return with the expectation that diff --git a/internal/pod/targetcontaineraction_test.go b/internal/pod/targetcontaineraction_test.go index 4f2fe4e..eb6e95e 100644 --- a/internal/pod/targetcontaineraction_test.go +++ b/internal/pod/targetcontaineraction_test.go @@ -159,14 +159,13 @@ func TestTargetContainerActionExecute(t *testing.T) { configHelperMockFunc: func(m *podtest.MockKubeHelper) {}, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - StartupProbe: podcommon.StateBoolFalse, - ReadinessProbe: podcommon.StateBoolFalse, - Container: podcommon.StateContainerRunning, - Started: podcommon.StateBoolFalse, - Ready: podcommon.StateBoolFalse, - Resources: podcommon.StateResourcesStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + StartupProbe: podcommon.StateBoolFalse, + ReadinessProbe: podcommon.StateBoolFalse, + Container: podcommon.StateContainerRunning, + Started: podcommon.StateBoolFalse, + Ready: podcommon.StateBoolFalse, + Resources: podcommon.StateResourcesStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, wantPanicErrMsg: "neither startup probe or readiness probe present", }, @@ -180,14 +179,13 @@ func TestTargetContainerActionExecute(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - StartupProbe: podcommon.StateBoolTrue, - ReadinessProbe: podcommon.StateBoolFalse, - Container: podcommon.StateContainerRunning, - Started: podcommon.StateBoolFalse, - Ready: podcommon.StateBoolFalse, - Resources: podcommon.StateResourcesStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + StartupProbe: podcommon.StateBoolTrue, + ReadinessProbe: podcommon.StateBoolFalse, + Container: podcommon.StateContainerRunning, + Started: podcommon.StateBoolFalse, + Ready: podcommon.StateBoolFalse, + Resources: podcommon.StateResourcesStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, wantLogMsg: "startup resources enacted", wantStatusUpdate: true, @@ -202,14 +200,13 @@ func TestTargetContainerActionExecute(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - StartupProbe: podcommon.StateBoolFalse, - ReadinessProbe: podcommon.StateBoolTrue, - Container: podcommon.StateContainerRunning, - Started: podcommon.StateBoolFalse, - Ready: podcommon.StateBoolFalse, - Resources: podcommon.StateResourcesStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + StartupProbe: podcommon.StateBoolFalse, + ReadinessProbe: podcommon.StateBoolTrue, + Container: podcommon.StateContainerRunning, + Started: podcommon.StateBoolFalse, + Ready: podcommon.StateBoolFalse, + Resources: podcommon.StateResourcesStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, wantLogMsg: "startup resources enacted", wantStatusUpdate: true, @@ -217,7 +214,7 @@ func TestTargetContainerActionExecute(t *testing.T) { { name: "StartedWithStartupResAction", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -237,7 +234,7 @@ func TestTargetContainerActionExecute(t *testing.T) { { name: "NotStartedWithPostStartupResAction", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -264,14 +261,13 @@ func TestTargetContainerActionExecute(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - StartupProbe: podcommon.StateBoolTrue, - ReadinessProbe: podcommon.StateBoolTrue, - Container: podcommon.StateContainerRunning, - Started: podcommon.StateBoolTrue, - Ready: podcommon.StateBoolTrue, - Resources: podcommon.StateResourcesPostStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + StartupProbe: podcommon.StateBoolTrue, + ReadinessProbe: podcommon.StateBoolTrue, + Container: podcommon.StateContainerRunning, + Started: podcommon.StateBoolTrue, + Ready: podcommon.StateBoolTrue, + Resources: podcommon.StateResourcesPostStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, wantLogMsg: "post-startup resources enacted", wantStatusUpdate: true, @@ -280,7 +276,7 @@ func TestTargetContainerActionExecute(t *testing.T) { name: "NotStartedWithUnknownResAction", scaleWhenUnknownRes: true, configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -301,7 +297,7 @@ func TestTargetContainerActionExecute(t *testing.T) { name: "StartedWithUnknownResAction", scaleWhenUnknownRes: true, configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -507,9 +503,8 @@ func TestTargetContainerActionNotStartedWithStartupResAction(t *testing.T) { { "Error", podcommon.States{ - Resources: podcommon.StateResourcesStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMismatch, + Resources: podcommon.StateResourcesStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMismatch, }, func(m *podtest.MockStatus, run func()) { m.UpdateDefaultAndRun(run) @@ -528,8 +523,7 @@ func TestTargetContainerActionNotStartedWithStartupResAction(t *testing.T) { { "Ok", podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, func(m *podtest.MockStatus, run func()) { m.UpdateDefaultAndRun(run) @@ -586,10 +580,10 @@ func TestTargetContainerActionNotStartedWithPostStartupResAction(t *testing.T) { { name: "UnableToPatchContainerResources", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(&v1.Pod{}, errors.New("")) }, wantErrMsg: "unable to patch container resources", @@ -598,7 +592,7 @@ func TestTargetContainerActionNotStartedWithPostStartupResAction(t *testing.T) { { name: "Ok", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -660,10 +654,10 @@ func TestTargetContainerActionStartedWithStartupResAction(t *testing.T) { { name: "UnableToPatchContainerResources", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(&v1.Pod{}, errors.New("")) }, wantErrMsg: "unable to patch container resources", @@ -672,7 +666,7 @@ func TestTargetContainerActionStartedWithStartupResAction(t *testing.T) { { name: "Ok", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -735,9 +729,8 @@ func TestTargetContainerActionStartedWithPostStartupResAction(t *testing.T) { { "Error", podcommon.States{ - Resources: podcommon.StateResourcesPostStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMismatch, + Resources: podcommon.StateResourcesPostStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMismatch, }, func(m *podtest.MockStatus, run func()) { m.UpdateDefaultAndRun(run) @@ -756,8 +749,7 @@ func TestTargetContainerActionStartedWithPostStartupResAction(t *testing.T) { { "Ok", podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, func(m *podtest.MockStatus, run func()) { m.UpdateDefaultAndRun(run) @@ -809,10 +801,10 @@ func TestTargetContainerActionNotStartedWithUnknownResAction(t *testing.T) { { name: "UnableToPatchContainerResources", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(&v1.Pod{}, errors.New("")) }, wantErrMsg: "unable to patch container resources", @@ -821,7 +813,7 @@ func TestTargetContainerActionNotStartedWithUnknownResAction(t *testing.T) { { name: "Ok", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -883,10 +875,10 @@ func TestTargetContainerActionStartedWithUnknownResAction(t *testing.T) { { name: "UnableToPatchContainerResources", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + m.On("UpdateContainerResources", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(&v1.Pod{}, errors.New("")) }, wantErrMsg: "unable to patch container resources", @@ -895,7 +887,7 @@ func TestTargetContainerActionStartedWithUnknownResAction(t *testing.T) { { name: "Ok", configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateMutatePodFuncDefaultAndRun(run) + m.PodMutationFuncDefaultAndRun(run) }, configHelperMockFunc: func(m *podtest.MockKubeHelper) { m.UpdateContainerResourcesDefault() @@ -1091,64 +1083,6 @@ func TestTargetContainerActionProcessConfigEnacted(t *testing.T) { wantEventMsg: "Startup scale: unknown status", }, - { - name: string(podcommon.StateAllocatedResourcesIncomplete), - configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateDefaultAndRun(run) - }, - configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.ResizeStatusDefault() - }, - configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, - states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesIncomplete, - }, - wantStatusUpdate: true, - wantLogMsg: "target container allocated cpu and/or memory resources currently missing", - }, - { - name: string(podcommon.StateAllocatedResourcesContainerRequestsMismatch), - configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateDefaultAndRun(run) - }, - configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.ResizeStatusDefault() - }, - configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, - states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMismatch, - }, - wantStatusUpdate: true, - wantLogMsg: "target container allocated cpu and/or memory resources currently don't match target container's 'requests'", - }, - { - name: string(podcommon.StateAllocatedResourcesUnknown), - configStatusMockFunc: func(m *podtest.MockStatus, run func()) { - m.UpdateDefaultAndRun(run) - }, - configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.ResizeStatusDefault() - }, - configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, - states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesUnknown, - }, - wantStatusUpdate: true, - wantLogMsg: "target container allocated cpu and/or memory resources currently unknown", - }, - { - name: "UnknownAllocatedResourcesStatePanics", - configStatusMockFunc: func(m *podtest.MockStatus, run func()) {}, - configHelperMockFunc: func(m *podtest.MockKubeHelper) { - m.ResizeStatusDefault() - }, - configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, - states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResources("test"), - }, - wantPanicErrMsg: "unknown state 'test'", - wantStatusUpdate: true, - }, { name: string(podcommon.StateStatusResourcesIncomplete), configStatusMockFunc: func(m *podtest.MockStatus, run func()) { @@ -1159,8 +1093,7 @@ func TestTargetContainerActionProcessConfigEnacted(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesIncomplete, + StatusResources: podcommon.StateStatusResourcesIncomplete, }, wantStatusUpdate: true, wantLogMsg: "target container current cpu and/or memory resources currently missing", @@ -1175,8 +1108,7 @@ func TestTargetContainerActionProcessConfigEnacted(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMismatch, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMismatch, }, wantStatusUpdate: true, wantLogMsg: "target container current cpu and/or memory resources currently don't match target container's 'requests'", @@ -1191,8 +1123,7 @@ func TestTargetContainerActionProcessConfigEnacted(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesUnknown, + StatusResources: podcommon.StateStatusResourcesUnknown, }, wantStatusUpdate: true, wantLogMsg: "target container current cpu and/or memory resources currently unknown", @@ -1205,8 +1136,7 @@ func TestTargetContainerActionProcessConfigEnacted(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResources("test"), + StatusResources: podcommon.StateStatusResources("test"), }, wantPanicErrMsg: "unknown state 'test'", wantStatusUpdate: true, @@ -1221,9 +1151,8 @@ func TestTargetContainerActionProcessConfigEnacted(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - Resources: podcommon.StateResourcesPostStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + Resources: podcommon.StateResourcesPostStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, wantStatusUpdate: true, wantLogMsg: "post-startup resources enacted", @@ -1239,9 +1168,8 @@ func TestTargetContainerActionProcessConfigEnacted(t *testing.T) { }, configContHelperMockFunc: func(m *podtest.MockContainerKubeHelper) {}, states: podcommon.States{ - Resources: podcommon.StateResourcesStartup, - AllocatedResources: podcommon.StateAllocatedResourcesContainerRequestsMatch, - StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, + Resources: podcommon.StateResourcesStartup, + StatusResources: podcommon.StateStatusResourcesContainerResourcesMatch, }, wantStatusUpdate: true, wantLogMsg: "startup resources enacted", diff --git a/internal/pod/targetcontainerstate.go b/internal/pod/targetcontainerstate.go index 6dd094c..7286004 100644 --- a/internal/pod/targetcontainerstate.go +++ b/internal/pod/targetcontainerstate.go @@ -81,14 +81,6 @@ func (s targetContainerState) States(ctx context.Context, pod *v1.Pod, config po s.isPostStartupConfigApplied(container, config), ) - ret.AllocatedResources, err = s.stateAllocatedResources(pod, container, config) - if err != nil { - if !s.shouldReturnError(ctx, err) { - return ret, nil - } - return ret, common.WrapErrorf(err, "unable to determine allocated resources states") - } - ret.StatusResources, err = s.stateStatusResources(pod, container, config) if err != nil { if !s.shouldReturnError(ctx, err) { @@ -182,37 +174,7 @@ func (s targetContainerState) stateResources( } } -// stateAllocatedResources returns the allocated resources state for the target container, using the supplied config. -func (s targetContainerState) stateAllocatedResources( - pod *v1.Pod, - container *v1.Container, - config podcommon.ScaleConfig, -) (podcommon.StateAllocatedResources, error) { - allocatedCpu, err := s.containerKubeHelper.AllocatedResources(pod, config.GetTargetContainerName(), v1.ResourceCPU) - if err != nil { - return podcommon.StateAllocatedResourcesUnknown, common.WrapErrorf(err, "unable to get allocated cpu resources") - } - - allocatedMemory, err := s.containerKubeHelper.AllocatedResources(pod, config.GetTargetContainerName(), v1.ResourceMemory) - if err != nil { - return podcommon.StateAllocatedResourcesUnknown, common.WrapErrorf(err, "unable to get allocated memory resources") - } - - if allocatedCpu.IsZero() || allocatedMemory.IsZero() { - return podcommon.StateAllocatedResourcesIncomplete, nil - } - - requestsCpu := s.containerKubeHelper.Requests(container, v1.ResourceCPU) - requestsMemory := s.containerKubeHelper.Requests(container, v1.ResourceMemory) - - if allocatedCpu.Equal(requestsCpu) && allocatedMemory.Equal(requestsMemory) { - return podcommon.StateAllocatedResourcesContainerRequestsMatch, nil - } - - return podcommon.StateAllocatedResourcesContainerRequestsMismatch, nil -} - -// stateAllocatedResources returns the status resources state for the target container, using the supplied config. +// stateStatusResources returns the status resources state for the target container, using the supplied config. func (s targetContainerState) stateStatusResources( pod *v1.Pod, container *v1.Container, @@ -293,11 +255,6 @@ func (s targetContainerState) shouldReturnError(ctx context.Context, err error) return false } - if errors.As(err, &ContainerStatusAllocatedResourcesNotPresentError{}) { - logging.Infof(ctx, logging.VDebug, "container status allocated resources not yet present") - return false - } - if errors.As(err, &ContainerStatusResourcesNotPresentError{}) { logging.Infof(ctx, logging.VDebug, "container status resources not yet present") return false diff --git a/internal/pod/targetcontainerstate_test.go b/internal/pod/targetcontainerstate_test.go index facf6cd..1a7292f 100644 --- a/internal/pod/targetcontainerstate_test.go +++ b/internal/pod/targetcontainerstate_test.go @@ -66,7 +66,6 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolUnknown, podcommon.StateBoolUnknown, podcommon.StateResourcesUnknown, - podcommon.StateAllocatedResourcesUnknown, podcommon.StateStatusResourcesUnknown, ), wantErrMsg: "unable to determine container state", @@ -87,7 +86,6 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolUnknown, podcommon.StateBoolUnknown, podcommon.StateResourcesUnknown, - podcommon.StateAllocatedResourcesUnknown, podcommon.StateStatusResourcesUnknown, ), wantErrMsg: "unable to determine started state", @@ -110,36 +108,10 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolTrue, podcommon.StateBoolUnknown, podcommon.StateResourcesUnknown, - podcommon.StateAllocatedResourcesUnknown, podcommon.StateStatusResourcesUnknown, ), wantErrMsg: "unable to determine ready state", }, - { - name: "UnableToGetAllocatedResourcesState", - configMockFunc: func(m *podtest.MockContainerKubeHelper) { - m.On("AllocatedResources", mock.Anything, mock.Anything, mock.Anything). - Return(resource.Quantity{}, errors.New("")) - m.GetDefault() - m.HasStartupProbeDefault() - m.HasReadinessProbeDefault() - m.StateDefault() - m.IsStartedDefault() - m.IsReadyDefault() - applyMockRequestsLimitsStartup(m) - }, - wantStates: podcommon.NewStates( - podcommon.StateBoolTrue, - podcommon.StateBoolTrue, - podcommon.StateContainerRunning, - podcommon.StateBoolTrue, - podcommon.StateBoolTrue, - podcommon.StateResourcesStartup, - podcommon.StateAllocatedResourcesUnknown, - podcommon.StateStatusResourcesUnknown, - ), - wantErrMsg: "unable to determine allocated resources states", - }, { name: "UnableToGetStatusResourcesState", configMockFunc: func(m *podtest.MockContainerKubeHelper) { @@ -152,7 +124,6 @@ func TestTargetContainerStateStates(t *testing.T) { m.IsStartedDefault() m.IsReadyDefault() applyMockRequestsLimitsStartup(m) - m.AllocatedResourcesDefault() }, wantStates: podcommon.NewStates( podcommon.StateBoolTrue, @@ -161,7 +132,6 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolTrue, podcommon.StateBoolTrue, podcommon.StateResourcesStartup, - podcommon.StateAllocatedResourcesContainerRequestsMismatch, podcommon.StateStatusResourcesUnknown, ), wantErrMsg: "unable to determine status resources states", @@ -181,7 +151,6 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolUnknown, podcommon.StateBoolUnknown, podcommon.StateResourcesUnknown, - podcommon.StateAllocatedResourcesUnknown, podcommon.StateStatusResourcesUnknown, ), }, @@ -201,7 +170,6 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolUnknown, podcommon.StateBoolUnknown, podcommon.StateResourcesUnknown, - podcommon.StateAllocatedResourcesUnknown, podcommon.StateStatusResourcesUnknown, ), }, @@ -222,31 +190,6 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolTrue, podcommon.StateBoolUnknown, podcommon.StateResourcesUnknown, - podcommon.StateAllocatedResourcesUnknown, - podcommon.StateStatusResourcesUnknown, - ), - }, - { - name: "StateAllocatedResourcesNotShouldReturnError", - configMockFunc: func(m *podtest.MockContainerKubeHelper) { - m.On("AllocatedResources", mock.Anything, mock.Anything, mock.Anything). - Return(resource.Quantity{}, NewContainerStatusNotPresentError()) - m.GetDefault() - m.HasStartupProbeDefault() - m.HasReadinessProbeDefault() - m.StateDefault() - m.IsStartedDefault() - m.IsReadyDefault() - applyMockRequestsLimitsStartup(m) - }, - wantStates: podcommon.NewStates( - podcommon.StateBoolTrue, - podcommon.StateBoolTrue, - podcommon.StateContainerRunning, - podcommon.StateBoolTrue, - podcommon.StateBoolTrue, - podcommon.StateResourcesStartup, - podcommon.StateAllocatedResourcesUnknown, podcommon.StateStatusResourcesUnknown, ), }, @@ -262,7 +205,6 @@ func TestTargetContainerStateStates(t *testing.T) { m.IsStartedDefault() m.IsReadyDefault() applyMockRequestsLimitsStartup(m) - m.AllocatedResourcesDefault() }, wantStates: podcommon.NewStates( podcommon.StateBoolTrue, @@ -271,7 +213,6 @@ func TestTargetContainerStateStates(t *testing.T) { podcommon.StateBoolTrue, podcommon.StateBoolTrue, podcommon.StateResourcesStartup, - podcommon.StateAllocatedResourcesContainerRequestsMismatch, podcommon.StateStatusResourcesUnknown, ), }, @@ -285,7 +226,6 @@ func TestTargetContainerStateStates(t *testing.T) { m.IsStartedDefault() m.IsReadyDefault() applyMockRequestsLimitsStartup(m) - m.AllocatedResourcesDefault() m.CurrentRequestsDefault() m.CurrentLimitsDefault() }, @@ -301,7 +241,6 @@ func TestTargetContainerStateStates(t *testing.T) { m.IsStartedDefault() m.IsReadyDefault() applyMockRequestsLimitsPostStartup(m) - m.AllocatedResourcesDefault() m.CurrentRequestsDefault() m.CurrentLimitsDefault() }, @@ -317,7 +256,6 @@ func TestTargetContainerStateStates(t *testing.T) { m.IsStartedDefault() m.IsReadyDefault() applyMockRequestsLimitsUnknown(m) - m.AllocatedResourcesDefault() m.CurrentRequestsDefault() m.CurrentLimitsDefault() }, @@ -570,85 +508,6 @@ func TestTargetContainerStateStateReady(t *testing.T) { } } -func TestTargetContainerStateStateAllocatedResources(t *testing.T) { - tests := []struct { - name string - configMockFunc func(*podtest.MockContainerKubeHelper) - want podcommon.StateAllocatedResources - wantErrMsg string - }{ - { - name: "UnableToGetAllocatedCpuResources", - configMockFunc: func(m *podtest.MockContainerKubeHelper) { - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceCPU). - Return(resource.Quantity{}, errors.New("")) - }, - want: podcommon.StateAllocatedResourcesUnknown, - wantErrMsg: "unable to get allocated cpu resources", - }, - { - name: "UnableToGetAllocatedMemoryResources", - configMockFunc: func(m *podtest.MockContainerKubeHelper) { - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceMemory). - Return(resource.Quantity{}, errors.New("")) - m.AllocatedResourcesDefault() - }, - want: podcommon.StateAllocatedResourcesUnknown, - wantErrMsg: "unable to get allocated memory resources", - }, - { - name: string(podcommon.StateAllocatedResourcesIncomplete), - configMockFunc: func(m *podtest.MockContainerKubeHelper) { - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceCPU). - Return(resource.Quantity{}, nil) - m.AllocatedResourcesDefault() - }, - want: podcommon.StateAllocatedResourcesIncomplete, - }, - { - name: string(podcommon.StateAllocatedResourcesContainerRequestsMatch), - configMockFunc: func(m *podtest.MockContainerKubeHelper) { - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceCPU). - Return(podtest.PodAnnotationCpuStartupQuantity, nil) - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceMemory). - Return(podtest.PodAnnotationMemoryStartupQuantity, nil) - m.On("Requests", mock.Anything, v1.ResourceCPU). - Return(podtest.PodAnnotationCpuStartupQuantity, nil) - m.On("Requests", mock.Anything, v1.ResourceMemory). - Return(podtest.PodAnnotationMemoryStartupQuantity, nil) - }, - want: podcommon.StateAllocatedResourcesContainerRequestsMatch, - }, - { - name: string(podcommon.StateAllocatedResourcesContainerRequestsMismatch), - configMockFunc: func(m *podtest.MockContainerKubeHelper) { - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceCPU). - Return(podtest.PodAnnotationCpuStartupQuantity, nil) - m.On("AllocatedResources", mock.Anything, mock.Anything, v1.ResourceMemory). - Return(podtest.PodAnnotationMemoryStartupQuantity, nil) - m.On("Requests", mock.Anything, v1.ResourceCPU). - Return(podtest.PodAnnotationCpuPostStartupRequestsQuantity, nil) - m.On("Requests", mock.Anything, v1.ResourceMemory). - Return(podtest.PodAnnotationMemoryStartupQuantity, nil) - }, - want: podcommon.StateAllocatedResourcesContainerRequestsMismatch, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := newTargetContainerState(podtest.NewMockContainerKubeHelper(tt.configMockFunc)) - - got, err := s.stateAllocatedResources(&v1.Pod{}, &v1.Container{}, NewScaleConfig(nil)) - if tt.wantErrMsg != "" { - assert.Contains(t, err.Error(), tt.wantErrMsg) - } else { - assert.Nil(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - func TestTargetContainerStateStateStatusResources(t *testing.T) { tests := []struct { name string @@ -774,7 +633,6 @@ func TestTargetContainerStateShouldReturnError(t *testing.T) { want bool }{ {"ContainerStatusResourcesNotPresentErrorFalse", NewContainerStatusResourcesNotPresentError(), false}, - {"ContainerStatusAllocatedResourcesNotPresentErrorFalse", NewContainerStatusAllocatedResourcesNotPresentError(), false}, {"NewContainerStatusResourcesNotPresentErrorFalse", NewContainerStatusResourcesNotPresentError(), false}, {"True", errors.New(""), true}, } diff --git a/scripts/sandbox/config/vars.sh b/scripts/sandbox/config/vars.sh index 573818c..1834adc 100644 --- a/scripts/sandbox/config/vars.sh +++ b/scripts/sandbox/config/vars.sh @@ -19,7 +19,7 @@ kind_cluster_name="csa-sandbox-cluster" # shellcheck disable=SC2034 -kind_kube_version="v1.31.3" +kind_kube_version="v1.32.0" # shellcheck disable=SC2034 kind_node_docker_tag="kindest/node:$kind_kube_version" diff --git a/scripts/sandbox/csa-install.sh b/scripts/sandbox/csa-install.sh index 7405fb7..16ead3f 100644 --- a/scripts/sandbox/csa-install.sh +++ b/scripts/sandbox/csa-install.sh @@ -14,13 +14,61 @@ # See the License for the specific language governing permissions and # limitations under the License. +extra_ca_cert_path="" + +while [ $# -gt 0 ]; do + case "$1" in + --extra-ca-cert-path=*) + extra_ca_cert_path="${1#*=}" + ;; + *) + echo "Unrecognized argument: $1. Supported: --extra-ca-cert-path (optional)." + exit 1 + esac + shift +done + source config/vars.sh source csa-uninstall.sh # shellcheck disable=SC2154 if [ -z "$(docker images --filter "reference=$kind_node_docker_tag" --format '{{.Repository}}:{{.Tag}}')" ]; then - # shellcheck disable=SC2154 - kind build node-image --type release "$kind_kube_version" --image "$kind_node_docker_tag" + if [ -n "$extra_ca_cert_path" ]; then + if [ ! -e "$extra_ca_cert_path" ]; then + echo "File supplied via --extra-ca-cert-path doesn't exist." + exit 1 + fi + + default_kind_base_image=$(kind build node-image --help | sed -n 's/.*--base-image.*default ".*\(kindest[^"]*\)".*/\1/p') + if [ -z "$default_kind_base_image" ]; then + echo "Unable to locate default base image." + exit 1 + fi + + # Gets overwritten if original base image tag is used, so alter. + built_kind_base_image_tag="$default_kind_base_image-extracacert" + + temp_dir=$(mktemp -d) + copied_extra_ca_cert_filename="extra-ca-cert.crt" + + cp "$extra_ca_cert_path" "$temp_dir/$copied_extra_ca_cert_filename" + docker build \ + -f extracacert/Dockerfile \ + -t "$built_kind_base_image_tag" \ + --build-arg "BASE_IMAGE=$default_kind_base_image" \ + --build-arg "EXTRA_CA_CERT_FILENAME=$copied_extra_ca_cert_filename" \ + "$temp_dir" + rm -rf "$temp_dir" + + kind build node-image \ + --type release "$kind_kube_version" \ + --base-image "$built_kind_base_image_tag" \ + --image "$kind_node_docker_tag" + else + kind build node-image \ + --type release "$kind_kube_version" \ + --image "$kind_node_docker_tag" + fi fi # shellcheck disable=SC2154 @@ -39,7 +87,6 @@ kubectl apply -k config/metricsserver --kubeconfig "$kind_kubeconfig" # shellcheck disable=SC2154 docker pull "$echo_server_docker_image_tag" -# shellcheck disable=SC2154 kind load docker-image "$echo_server_docker_image_tag" --name "$kind_cluster_name" # shellcheck disable=SC2154 diff --git a/scripts/sandbox/extracacert/Dockerfile b/scripts/sandbox/extracacert/Dockerfile new file mode 100644 index 0000000..5772d34 --- /dev/null +++ b/scripts/sandbox/extracacert/Dockerfile @@ -0,0 +1,6 @@ +ARG BASE_IMAGE=kindest/base:v20241108-5c6d2daf + +FROM ${BASE_IMAGE} +ARG EXTRA_CA_CERT_FILENAME +COPY ./${EXTRA_CA_CERT_FILENAME} /usr/local/share/ca-certificates/extra-ca-cert.crt +RUN update-ca-certificates diff --git a/test/integration/consts.go b/test/integration/consts.go index 1494140..4ed17d0 100644 --- a/test/integration/consts.go +++ b/test/integration/consts.go @@ -32,7 +32,7 @@ const ( ) var kubeVersionToFullVersion = map[string]string{ - "1.31": "v1.31.3", + "1.32": "v1.32.0", // Older versions are not supported by 'kind build node-image' as the server tgzs don't include the 'version' file // and fail. } diff --git a/test/integration/csa.go b/test/integration/csa.go index cf861a0..f135d31 100644 --- a/test/integration/csa.go +++ b/test/integration/csa.go @@ -155,7 +155,7 @@ func csaWaitStatus( logMessage(t, fmt.Sprintf("current csa status for pod '%s/%s': %s", podNamespace, podName, lastStatusAnnJson)) if strings.Contains(statusAnn.Status, waitMsgContains) { - // TODO(wt) 'In-place Update of Pod Resources' implementation bug (Kube 1.29) + // TODO(wt) 'In-place Update of Pod Resources' implementation bug (still in Kube 1.32). // See large comment at top of integration_test.go - need to re-get pod in case resize is restarted. // Remove once fixed. if getAgain { diff --git a/test/integration/extracacert/Dockerfile b/test/integration/extracacert/Dockerfile new file mode 100644 index 0000000..5772d34 --- /dev/null +++ b/test/integration/extracacert/Dockerfile @@ -0,0 +1,6 @@ +ARG BASE_IMAGE=kindest/base:v20241108-5c6d2daf + +FROM ${BASE_IMAGE} +ARG EXTRA_CA_CERT_FILENAME +COPY ./${EXTRA_CA_CERT_FILENAME} /usr/local/share/ca-certificates/extra-ca-cert.crt +RUN update-ca-certificates diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2212fef..7f8b5a9 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -31,7 +31,7 @@ import ( /* -// TODO(wt) 'In-place Update of Pod Resources' implementation bug (Kube 1.29) +TODO(wt) 'In-place Update of Pod Resources' implementation bug (still in Kube 1.32). Note: there currently appears to be a bug in the 'In-place Update of Pod Resources' implementation whereby successful resizes are restarted - this is specifically mitigated against within csaWaitStatus(). This sometimes (depending on the timing of retrieving pods via kubectl) manifested in a CSA status that (correctly) stated that the resize had occurred, @@ -53,7 +53,6 @@ Example logs of such an event (restart marked with '<-- HERE'): "started": "false", "ready": "false", "resources": "poststartup", - "allocatedResources": "unknown", "statusResources": "unknown" }, "time": 1698695785143, @@ -72,7 +71,6 @@ Example logs of such an event (restart marked with '<-- HERE'): "started": "false", "ready": "false", "resources": "poststartup", - "allocatedResources": "containerrequestsmatch", "statusResources": "containerresourcesmatch" }, "time": 1698695786101, @@ -91,7 +89,6 @@ Example logs of such an event (restart marked with '<-- HERE'): "started": "false", "ready": "false", "resources": "startup", - "allocatedResources": "containerrequestsmismatch", "statusResources": "containerresourcesmismatch" }, "time": 1698695786101, @@ -110,7 +107,6 @@ Example logs of such an event (restart marked with '<-- HERE'): "started": "false", "ready": "false", "resources": "startup", - "allocatedResources": "containerrequestsmatch", "statusResources": "containerresourcesmismatch" }, "time": 1698695788169, @@ -129,7 +125,6 @@ Example logs of such an event (restart marked with '<-- HERE'): "started": "false", "ready": "false", "resources": "startup", - "allocatedResources": "containerrequestsmatch", "statusResources": "containerresourcesmatch" }, "time": 1698695789015, @@ -148,7 +143,6 @@ Example logs of such an event (restart marked with '<-- HERE'): "started": "false", "ready": "false", "resources": "startup", - "allocatedResources": "containerrequestsmatch", "statusResources": "containerresourcesmismatch" <-- HERE }, "time": 1698695789058, @@ -167,7 +161,6 @@ Example logs of such an event (restart marked with '<-- HERE'): "started": "false", "ready": "false", "resources": "startup", - "allocatedResources": "containerrequestsmatch", "statusResources": "containerresourcesmatch" }, "time": 1698695789982, @@ -545,7 +538,6 @@ func TestValidationFailure(t *testing.T) { require.Equal(t, podcommon.StateBoolUnknown, statusAnn.States.Started) require.Equal(t, podcommon.StateBoolUnknown, statusAnn.States.Ready) require.Equal(t, podcommon.StateResourcesUnknown, statusAnn.States.Resources) - require.Equal(t, podcommon.StateAllocatedResourcesUnknown, statusAnn.States.AllocatedResources) require.Equal(t, podcommon.StateStatusResourcesUnknown, statusAnn.States.StatusResources) require.Empty(t, statusAnn.Scale.LastCommanded) @@ -677,12 +669,10 @@ func assertStartupEnacted( } for _, s := range pod.Status.ContainerStatuses { - expectCpuA, expectMemoryA := annotations.cpuPostStartupRequests, annotations.memoryPostStartupRequests expectCpuR, expectCpuL := annotations.cpuPostStartupRequests, annotations.cpuPostStartupLimits expectMemoryR, expectMemoryL := annotations.memoryPostStartupRequests, annotations.memoryPostStartupLimits if s.Name == echoServerName { - expectCpuA, expectMemoryA = annotations.cpuStartup, annotations.memoryStartup expectCpuR, expectCpuL = annotations.cpuStartup, annotations.cpuStartup expectMemoryR, expectMemoryL = annotations.memoryStartup, annotations.memoryStartup @@ -696,10 +686,6 @@ func assertStartupEnacted( } require.NotNil(t, s.State.Running) - cpuA := s.AllocatedResources[v1.ResourceCPU] - require.Equal(t, expectCpuA, cpuA.String()) - memoryA := s.AllocatedResources[v1.ResourceMemory] - require.Equal(t, expectMemoryA, memoryA.String()) cpuR := s.Resources.Requests[v1.ResourceCPU] require.Equal(t, expectCpuR, cpuR.String()) cpuL := s.Resources.Limits[v1.ResourceCPU] @@ -723,7 +709,6 @@ func assertStartupEnacted( } require.Equal(t, podcommon.StateBoolFalse, statusAnn.States.Ready) require.Equal(t, podcommon.StateResourcesStartup, statusAnn.States.Resources) - require.Equal(t, podcommon.StateAllocatedResourcesContainerRequestsMatch, statusAnn.States.AllocatedResources) require.Equal(t, podcommon.StateStatusResourcesContainerResourcesMatch, statusAnn.States.StatusResources) if expectStatusCommandedEnactedEmpty { @@ -770,7 +755,6 @@ func assertPostStartupEnacted( } for _, s := range pod.Status.ContainerStatuses { - expectCpuA, expectMemoryA := annotations.cpuPostStartupRequests, annotations.memoryPostStartupRequests expectCpuR, expectCpuL := annotations.cpuPostStartupRequests, annotations.cpuPostStartupLimits expectMemoryR, expectMemoryL := annotations.memoryPostStartupRequests, annotations.memoryPostStartupLimits @@ -781,10 +765,6 @@ func assertPostStartupEnacted( } require.NotNil(t, s.State.Running) - cpuA := s.AllocatedResources[v1.ResourceCPU] - require.Equal(t, expectCpuA, cpuA.String()) - memoryA := s.AllocatedResources[v1.ResourceMemory] - require.Equal(t, expectMemoryA, memoryA.String()) cpuR := s.Resources.Requests[v1.ResourceCPU] require.Equal(t, expectCpuR, cpuR.String()) cpuL := s.Resources.Limits[v1.ResourceCPU] @@ -804,7 +784,6 @@ func assertPostStartupEnacted( require.Equal(t, podcommon.StateBoolTrue, statusAnn.States.Started) require.Equal(t, podcommon.StateBoolTrue, statusAnn.States.Ready) require.Equal(t, podcommon.StateResourcesPostStartup, statusAnn.States.Resources) - require.Equal(t, podcommon.StateAllocatedResourcesContainerRequestsMatch, statusAnn.States.AllocatedResources) require.Equal(t, podcommon.StateStatusResourcesContainerResourcesMatch, statusAnn.States.StatusResources) require.NotEmpty(t, statusAnn.Scale.LastCommanded) diff --git a/test/integration/kind.go b/test/integration/kind.go index 84481e0..c3cc1e9 100644 --- a/test/integration/kind.go +++ b/test/integration/kind.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "os/exec" + "regexp" "strings" "testing" @@ -66,12 +67,12 @@ func kindSetupCluster(t *testing.T) { os.Exit(1) } - dockerTag := fmt.Sprintf("kindest/node:%s", kubeFullVersion) + dockerTag := "kindest/node:" + kubeFullVersion output, _ = cmdRun( t, exec.Command("docker", "images", - "--filter", fmt.Sprintf("reference=%s", dockerTag), + "--filter", "reference="+dockerTag, "--format", "{{.Repository}}:{{.Tag}}", ), "getting existing docker images...", @@ -80,16 +81,82 @@ func kindSetupCluster(t *testing.T) { ) if output == "" { - _, _ = cmdRun( - t, - exec.Command("kind", "build", "node-image", - "--type", "release", kubeFullVersion, - "--image", dockerTag, - ), - "building kind node image...", - "unable to build kind node image", - true, - ) + if suppliedConfig.extraCaCertPath != "" { + output, _ = cmdRun( + t, + exec.Command("kind", "build", "node-image", "--help"), + "getting kind default node image...", + "unable to get kind default node image", + true, + ) + + defaultKindBaseImage := regexp.MustCompile(`kindest/base:v[0-9]+-[a-f0-9]+`).FindString(output) + if defaultKindBaseImage == "" { + logMessage(t, "unable to locate default base image") + os.Exit(1) + } + + builtKindBaseImageTag := defaultKindBaseImage + "-extracacert" + + tempDir, err := os.MkdirTemp("", "*") + if err != nil { + logMessage(t, common.WrapErrorf(err, "unable to create temporary directory")) + os.Exit(1) + } + defer func(path string) { + _ = os.RemoveAll(path) + }(tempDir) + + copiedExtraCaCertFilename := "extra-ca-cert.crt" + + cert, err := os.ReadFile(suppliedConfig.extraCaCertPath) + if err != nil { + logMessage(t, common.WrapErrorf(err, "unable to read extra CA certificate file")) + os.Exit(1) + } + + if err := os.WriteFile(tempDir+pathSeparator+copiedExtraCaCertFilename, cert, 0644); err != nil { + logMessage(t, common.WrapErrorf(err, "unable to write CA certificate file in temporary directory")) + os.Exit(1) + } + + _, _ = cmdRun( + t, + exec.Command("docker", "build", + "-f", "extracacert/Dockerfile", + "-t", builtKindBaseImageTag, + "--build-arg", "BASE_IMAGE="+defaultKindBaseImage, + "--build-arg", "EXTRA_CA_CERT_FILENAME="+copiedExtraCaCertFilename, + tempDir, + ), + "building kind base image...", + "unable to build kind base image", + true, + ) + + _, _ = cmdRun( + t, + exec.Command("kind", "build", "node-image", + "--type", "release", kubeFullVersion, + "--base-image", builtKindBaseImageTag, + "--image", dockerTag, + ), + "building kind node image...", + "unable to build kind node image", + true, + ) + } else { + _, _ = cmdRun( + t, + exec.Command("kind", "build", "node-image", + "--type", "release", kubeFullVersion, + "--image", dockerTag, + ), + "building kind node image...", + "unable to build kind node image", + true, + ) + } } _, _ = cmdRun( diff --git a/test/integration/suppliedconfig.go b/test/integration/suppliedconfig.go index 4ce3985..18b39fa 100644 --- a/test/integration/suppliedconfig.go +++ b/test/integration/suppliedconfig.go @@ -25,6 +25,7 @@ import ( type suppliedConfigStruct struct { kubeVersion string maxParallelism string + extraCaCertPath string reuseCluster bool installMetricsServer bool keepCsa bool @@ -35,6 +36,7 @@ type suppliedConfigStruct struct { var suppliedConfig = suppliedConfigStruct{ kubeVersion: "", maxParallelism: "4", + extraCaCertPath: "", reuseCluster: false, installMetricsServer: false, keepCsa: false, @@ -43,16 +45,18 @@ var suppliedConfig = suppliedConfigStruct{ } func suppliedConfigInit() { - suppliedConfigSetString("KUBE_VERSION", &suppliedConfig.kubeVersion) - suppliedConfigSetString("MAX_PARALLELISM", &suppliedConfig.maxParallelism) - suppliedConfigSetBool("REUSE_CLUSTER", &suppliedConfig.reuseCluster) - suppliedConfigSetBool("INSTALL_METRICS_SERVER", &suppliedConfig.installMetricsServer) - suppliedConfigSetBool("KEEP_CSA", &suppliedConfig.keepCsa) - suppliedConfigSetBool("KEEP_CLUSTER", &suppliedConfig.keepCluster) - suppliedConfigSetBool("DELETE_NS_AFTER_TEST", &suppliedConfig.deleteNsPostTest) + suppliedConfigSetString("KUBE_VERSION", &suppliedConfig.kubeVersion, true) + suppliedConfigSetString("MAX_PARALLELISM", &suppliedConfig.maxParallelism, true) + suppliedConfigSetString("EXTRA_CA_CERT_PATH", &suppliedConfig.extraCaCertPath, false) + suppliedConfigSetBool("REUSE_CLUSTER", &suppliedConfig.reuseCluster, true) + suppliedConfigSetBool("INSTALL_METRICS_SERVER", &suppliedConfig.installMetricsServer, true) + suppliedConfigSetBool("KEEP_CSA", &suppliedConfig.keepCsa, true) + suppliedConfigSetBool("KEEP_CLUSTER", &suppliedConfig.keepCluster, true) + suppliedConfigSetBool("DELETE_NS_AFTER_TEST", &suppliedConfig.deleteNsPostTest, true) - logMessage(nil, fmt.Sprintf("(config) KUBE_VERSION: %s", suppliedConfig.kubeVersion)) - logMessage(nil, fmt.Sprintf("(config) MAX_PARALLELISM: %s", suppliedConfig.maxParallelism)) + logMessage(nil, fmt.Sprintf("(config) KUBE_VERSION: "+suppliedConfig.kubeVersion)) + logMessage(nil, fmt.Sprintf("(config) MAX_PARALLELISM: "+suppliedConfig.maxParallelism)) + logMessage(nil, fmt.Sprintf("(config) EXTRA_CA_CERT_PATH: "+suppliedConfig.extraCaCertPath)) logMessage(nil, fmt.Sprintf("(config) REUSE_CLUSTER: %t", suppliedConfig.reuseCluster)) logMessage(nil, fmt.Sprintf("(config) INSTALL_METRICS_SERVER: %t", suppliedConfig.installMetricsServer)) logMessage(nil, fmt.Sprintf("(config) KEEP_CSA: %t", suppliedConfig.keepCsa)) @@ -60,11 +64,11 @@ func suppliedConfigInit() { logMessage(nil, fmt.Sprintf("(config) DELETE_NS_AFTER_TEST: %t", suppliedConfig.deleteNsPostTest)) } -func suppliedConfigSetString(env string, config *string) { +func suppliedConfigSetString(env string, config *string, required bool) { envVal := os.Getenv(env) + haveEnvOrDefault := envVal != "" || (config != nil && *config != "") - if envVal == "" && (config == nil || *config == "") { - // Require env unless defaulted via supplied. + if !haveEnvOrDefault && required { logMessage(nil, fmt.Sprintf("(config) '%s' value is required", env)) os.Exit(1) } @@ -74,11 +78,11 @@ func suppliedConfigSetString(env string, config *string) { } } -func suppliedConfigSetBool(env string, config *bool) { +func suppliedConfigSetBool(env string, config *bool, required bool) { envVal := os.Getenv(env) + haveEnvOrDefault := envVal != "" || config != nil - if envVal == "" && config == nil { - // Require env unless defaulted via supplied. + if !haveEnvOrDefault && required { logMessage(nil, fmt.Sprintf("(config) '%s' value is required", env)) os.Exit(1) }