diff --git a/CHANGELOG.md b/CHANGELOG.md
index 11c399192637b..f73e15d53cf11 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,63 @@
# Changelog
+## 16.4.5 (10/22/2024)
+
+### Security Fixes
+
+#### [High] Privilege persistence in Okta SCIM-only integration
+
+When Okta SCIM-only integration is enabled, in certain cases Teleport could
+calculate the effective set of permission based on SSO user's stale traits. This
+could allow a user who was unassigned from an Okta group to log into a Teleport
+cluster once with a role granted by the unassigned group being present in their
+effective role set.
+
+Note: This issue only affects Teleport clusters that have installed a SCIM-only
+Okta integration as described in this guide. If you have an Okta integration
+with user sync enabled or only using Okta SSO auth connector to log into your
+Teleport cluster without SCIM integration configured, you're unaffected. To
+verify your configuration:
+
+- Use `tctl get plugins/okta --format=json | jq ".[].spec.Settings.okta.sync_settings.sync_users"`
+ command to check if you have Okta integration with user sync enabled. If it
+ outputs null or false, you may be affected and should upgrade.
+- Check SCIM provisioning settings for the Okta application you created or
+ updated while following the SCIM-only setup guide. If SCIM provisioning is
+ enabled, you may be affected and should upgrade.
+
+We strongly recommend customers who use Okta SCIM integration to upgrade their
+auth servers to version 16.3.0 or later. Teleport services other than auth
+(proxy, SSH, Kubernetes, desktop, application, database and discovery) are not
+impacted and do not need to be updated.
+
+### Other improvements and fixes
+
+* Added a new teleport_roles_total metric that exposes the number of roles which exist in a cluster. [#47812](https://github.com/gravitational/teleport/pull/47812)
+* Teleport's Windows Desktop Service now filters domain-joined Linux hosts out during LDAP discovery. [#47773](https://github.com/gravitational/teleport/pull/47773)
+* The `join_token.create` audit event has been enriched with additional metadata. [#47765](https://github.com/gravitational/teleport/pull/47765)
+* Propagate resources configured in teleport-kube-agent chart values to post-install and post-delete hooks. [#47743](https://github.com/gravitational/teleport/pull/47743)
+* Add support for the Datadog Incident Management plugin helm chart. [#47727](https://github.com/gravitational/teleport/pull/47727)
+* Automatic device enrollment may be locally disabled using the TELEPORT_DEVICE_AUTO_ENROLL_DISABLED=1 environment variable. [#47720](https://github.com/gravitational/teleport/pull/47720)
+* Fixed the Machine ID and GitHub Actions wizard. [#47708](https://github.com/gravitational/teleport/pull/47708)
+* Added migration to update the old import_all_objects database object import rule to the new preset. [#47707](https://github.com/gravitational/teleport/pull/47707)
+* Alter ServiceAccounts in the teleport-cluster Helm chart to automatically disable mounting of service account tokens on newer Kubernetes distributions, helping satisfy security linters. [#47703](https://github.com/gravitational/teleport/pull/47703)
+* Avoid tsh auto-enroll escalation in machines without a TPM. [#47695](https://github.com/gravitational/teleport/pull/47695)
+* Fixed a bug that prevented users from canceling `tsh scan keys` executions. [#47658](https://github.com/gravitational/teleport/pull/47658)
+* Postgres database session start events now include the Postgres backend PID for the session. [#47643](https://github.com/gravitational/teleport/pull/47643)
+* Reworked the `teleport-event-handler` integration to significantly improve performance, especially when running with larger `--concurrency` values. [#47633](https://github.com/gravitational/teleport/pull/47633)
+* Fixes a bug where Let's Encrypt certificate renewal failed in AMI and HA deployments due to insufficient disk space caused by syncing audit logs. [#47622](https://github.com/gravitational/teleport/pull/47622)
+* Adds support for custom SQS consumer lock name and disabling a consumer. [#47614](https://github.com/gravitational/teleport/pull/47614)
+* Fixed an issue that prevented RDS Aurora discovery configuration in the AWS OIDC enrollment wizard when any cluster existed without member instances. [#47605](https://github.com/gravitational/teleport/pull/47605)
+* Extend the Datadog plugin to support automatic approvals. [#47602](https://github.com/gravitational/teleport/pull/47602)
+* Allow using a custom database for Firestore backends. [#47583](https://github.com/gravitational/teleport/pull/47583)
+* Include host name instead of host uuid in error messages when SSH connections are prevented due to an invalid login. [#47578](https://github.com/gravitational/teleport/pull/47578)
+* Fix the example Terraform code to support the new larger Teleport Enterprise licenses and updates output of web address to use fqdn when ACM is disabled. [#47512](https://github.com/gravitational/teleport/pull/47512)
+* Add new `tctl` subcommands to manage bot instances. [#47225](https://github.com/gravitational/teleport/pull/47225)
+
+Enterprise:
+* Device auto-enroll failures are now recorded in the audit log.
+* Fixed possible panic when processing Okta assignments.
+
## 16.4.3 (10/16/2024)
* Extended Teleport Discovery Service to support resource discovery across all projects accessible by the service account. [#47568](https://github.com/gravitational/teleport/pull/47568)
diff --git a/Makefile b/Makefile
index ea8defde0e24a..2ecb800220c59 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@
# Stable releases: "1.0.0"
# Pre-releases: "1.0.0-alpha.1", "1.0.0-beta.2", "1.0.0-rc.3"
# Master/dev branch: "1.0.0-dev"
-VERSION=16.4.3
+VERSION=16.4.5
DOCKER_IMAGE ?= teleport
diff --git a/api/version.go b/api/version.go
index 472d98380c0c9..406229d62dec6 100644
--- a/api/version.go
+++ b/api/version.go
@@ -3,6 +3,6 @@ package api
import "github.com/coreos/go-semver/semver"
-const Version = "16.4.3"
+const Version = "16.4.5"
var SemVersion = semver.New(Version)
diff --git a/build.assets/macos/tsh/tsh.app/Contents/Info.plist b/build.assets/macos/tsh/tsh.app/Contents/Info.plist
index af98db0274660..86f7207e54629 100644
--- a/build.assets/macos/tsh/tsh.app/Contents/Info.plist
+++ b/build.assets/macos/tsh/tsh.app/Contents/Info.plist
@@ -19,13 +19,13 @@
CFBundlePackageType
APPL
CFBundleShortVersionString
- 16.4.3
+ 16.4.5
CFBundleSupportedPlatforms
MacOSX
CFBundleVersion
- 16.4.3
+ 16.4.5
DTCompiler
com.apple.compilers.llvm.clang.1_0
DTPlatformBuild
diff --git a/build.assets/macos/tshdev/tsh.app/Contents/Info.plist b/build.assets/macos/tshdev/tsh.app/Contents/Info.plist
index 39c295153fd44..8bacfe6df9f58 100644
--- a/build.assets/macos/tshdev/tsh.app/Contents/Info.plist
+++ b/build.assets/macos/tshdev/tsh.app/Contents/Info.plist
@@ -17,13 +17,13 @@
CFBundlePackageType
APPL
CFBundleShortVersionString
- 16.4.3
+ 16.4.5
CFBundleSupportedPlatforms
MacOSX
CFBundleVersion
- 16.4.3
+ 16.4.5
DTCompiler
com.apple.compilers.llvm.clang.1_0
DTPlatformBuild
diff --git a/docs/img/azuread/azuread-8c-usernameclaim.png b/docs/img/azuread/azuread-8c-usernameclaim.png
index c4522140a05e5..884df14c9eca1 100644
Binary files a/docs/img/azuread/azuread-8c-usernameclaim.png and b/docs/img/azuread/azuread-8c-usernameclaim.png differ
diff --git a/docs/pages/admin-guides/access-controls/sso/azuread.mdx b/docs/pages/admin-guides/access-controls/sso/azuread.mdx
index f6e059115036d..52dd8cc7701c5 100644
--- a/docs/pages/admin-guides/access-controls/sso/azuread.mdx
+++ b/docs/pages/admin-guides/access-controls/sso/azuread.mdx
@@ -92,14 +92,18 @@ Before you get started, you’ll need:
![Put in Security group claim](../../../../img/azuread/azuread-8b-groupclaim.png)
-1. Add a claim that transforms the format of the Azure AD username to lower case, in order to pass it to
- Teleport. Set the Source to "Transformation". In the new panel:
+1. (optional) Add a claim that transforms the format of the Azure AD username to lower case, in order to use it inside
+ Teleport roles as the `{{external.username}}` property.
+
+ Set the Source to "Transformation". In the new panel:
- Set the Transformation value to "Extract()"
- Set the Attribute name to `user.userprincipalname`.
- - Set the Value to `ToLowercase()`.
+ - Set the Value to `@`.
+
+ - Click "Add Transformation" and set the Transformation to `ToLowercase()`.
![Add a transformed username](../../../../img/azuread/azuread-8c-usernameclaim.png)
diff --git a/docs/pages/admin-guides/teleport-policy/integrations/aws-sync.mdx b/docs/pages/admin-guides/teleport-policy/integrations/aws-sync.mdx
index cb35dc9c2c067..2aae7cd963fb5 100644
--- a/docs/pages/admin-guides/teleport-policy/integrations/aws-sync.mdx
+++ b/docs/pages/admin-guides/teleport-policy/integrations/aws-sync.mdx
@@ -60,12 +60,13 @@ graphical representation thereof.
## Prerequisites
- A running Teleport Enterprise cluster v14.3.9/v15.2.0 or later.
-- For self-hosted clusters, an updated `license.pem` with Teleport Policy enabled.
-- For self-hosted clusters, a running Access Graph node v1.17.0 or later.
-Check [Access Graph page](../teleport-policy.mdx) for details on
+- Teleport Policy enabled for your account.
+- For self-hosted clusters:
+ - Ensure that an up-to-date `license.pem` is used in the Auth Service configuration.
+ - A running Access Graph node v1.17.0 or later.
+Check the [Teleport Policy page](../teleport-policy.mdx) for details on
how to set up Access Graph.
-- The node running the Access Graph service must be reachable
-from Teleport Auth Service and Discovery Service.
+ - The node running the Access Graph service must be reachable from the Teleport Auth Service.
## Step 1/2. Configure Discovery Service (Self-hosted only)
diff --git a/docs/pages/admin-guides/teleport-policy/integrations/entra-id.mdx b/docs/pages/admin-guides/teleport-policy/integrations/entra-id.mdx
index 67d9736ed8ff2..da9b9e7feff9b 100644
--- a/docs/pages/admin-guides/teleport-policy/integrations/entra-id.mdx
+++ b/docs/pages/admin-guides/teleport-policy/integrations/entra-id.mdx
@@ -35,11 +35,12 @@ These resources are then visualized using the graph representation detailed in t
- A running Teleport Enterprise cluster v15.4.2/v16.0.0 or later.
- Teleport Identity and Teleport Policy enabled for your account.
- - For self-hosted clusters, ensure that an up-to-date `license.pem` is used in the Auth Service configuration.
-- For self-hosted clusters, a running Access Graph node v1.21.3 or later.
+- For self-hosted clusters:
+ - Ensure that an up-to-date `license.pem` is used in the Auth Service configuration.
+ - A running Access Graph node v1.21.3 or later.
Check the [Teleport Policy page](../teleport-policy.mdx) for details on
how to set up Access Graph.
-- The node running the Access Graph service must be reachable from the Teleport Auth Service.
+ - The node running the Access Graph service must be reachable from the Teleport Auth Service.
- Your user must have privileged administrator permissions in the Azure account
To verify that Access Graph is set up correctly for your cluster, sign in to the Teleport Web UI and navigate to the Management tab.
diff --git a/docs/pages/admin-guides/teleport-policy/integrations/gitlab.mdx b/docs/pages/admin-guides/teleport-policy/integrations/gitlab.mdx
index 83cc193507070..3a25ef7ad225f 100644
--- a/docs/pages/admin-guides/teleport-policy/integrations/gitlab.mdx
+++ b/docs/pages/admin-guides/teleport-policy/integrations/gitlab.mdx
@@ -46,13 +46,14 @@ graphical representation thereof.
## Prerequisites
- A running Teleport Enterprise cluster v14.3.20/v15.3.1/v16.0.0 or later.
-- For self-hosted clusters, an updated `license.pem` with Teleport Policy enabled.
-- For self-hosted clusters, a running Access Graph node v1.21.4 or later.
-Check [Access Graph page](../teleport-policy.mdx) for details on
-how to set up Access Graph.
-- For self-hosted clusters, the node running the Access Graph service must be reachable
-from Teleport Auth Service.
+- Teleport Policy enabled for your account.
- A GitLab instance running GitLab v9.0 or later.
+- For self-hosted clusters:
+ - Ensure that an up-to-date `license.pem` is used in the Auth Service configuration.
+ - A running Access Graph node v1.21.4 or later.
+Check the [Teleport Policy page](../teleport-policy.mdx) for details on
+how to set up Access Graph.
+ - The node running the Access Graph service must be reachable from the Teleport Auth Service.
## Step 1/3. Create GitLab token
diff --git a/docs/pages/admin-guides/teleport-policy/integrations/ssh-keys-scan.mdx b/docs/pages/admin-guides/teleport-policy/integrations/ssh-keys-scan.mdx
index 8aa1b8eac451a..8c50d3ad2da9d 100644
--- a/docs/pages/admin-guides/teleport-policy/integrations/ssh-keys-scan.mdx
+++ b/docs/pages/admin-guides/teleport-policy/integrations/ssh-keys-scan.mdx
@@ -70,15 +70,16 @@ It also never sends the private key path or any other sensitive information.
## Prerequisites
- A running Teleport Enterprise cluster v15.4.16/v16.2.0 or later.
-- For self-hosted clusters, an updated `license.pem` with Teleport Policy enabled.
-- For self-hosted clusters, a running Access Graph node v1.22.0 or later.
-Check [Access Graph page](../teleport-policy.mdx) for details on
-how to set up Access Graph.
-- For self-hosted clusters, the node running the Access Graph service must be reachable
-from Teleport Auth Service.
+- Teleport Policy enabled for your account.
- A Linux/macOS server running the Teleport SSH Service.
- Devices enrolled in the [Teleport Device Trust feature](../../access-controls/device-trust.mdx).
- For Jamf Pro integration, devices must be enrolled in Jamf Pro and have the signed `tsh` binary installed.
+- For self-hosted clusters:
+ - Ensure that an up-to-date `license.pem` is used in the Auth Service configuration.
+ - A running Access Graph node v1.22.0 or later.
+Check the [Teleport Policy page](../teleport-policy.mdx) for details on
+how to set up Access Graph.
+ - The node running the Access Graph service must be reachable from the Teleport Auth Service.
## Step 1/3. Enable SSH Key Scanning
diff --git a/e b/e
index 28fe9fa2d65f6..f8d01dcf9eca4 160000
--- a/e
+++ b/e
@@ -1 +1 @@
-Subproject commit 28fe9fa2d65f64f5d008653eae0c12781e94308b
+Subproject commit f8d01dcf9eca4bf6dcc4cf2f0edda47e7a7d6534
diff --git a/examples/chart/access/datadog/Chart.yaml b/examples/chart/access/datadog/Chart.yaml
index 91a197f6e006f..1fd6b740d047d 100644
--- a/examples/chart/access/datadog/Chart.yaml
+++ b/examples/chart/access/datadog/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-datadog
diff --git a/examples/chart/access/datadog/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/datadog/tests/__snapshot__/configmap_test.yaml.snap
index 884d6dc81aea1..14844c2d47f42 100644
--- a/examples/chart/access/datadog/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/datadog/tests/__snapshot__/configmap_test.yaml.snap
@@ -26,6 +26,6 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-datadog
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-datadog-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-datadog-16.4.5
name: RELEASE-NAME-teleport-plugin-datadog
diff --git a/examples/chart/access/datadog/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/datadog/tests/__snapshot__/deployment_test.yaml.snap
index 6208e834ed127..615b9e7e4c0fb 100644
--- a/examples/chart/access/datadog/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/datadog/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-datadog
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-datadog-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-datadog-16.4.5
name: RELEASE-NAME-teleport-plugin-datadog
spec:
replicas: 1
@@ -22,8 +22,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-datadog
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-datadog-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-datadog-16.4.5
spec:
containers:
- command:
diff --git a/examples/chart/access/discord/Chart.yaml b/examples/chart/access/discord/Chart.yaml
index c9afe79f4541a..c40016bb876af 100644
--- a/examples/chart/access/discord/Chart.yaml
+++ b/examples/chart/access/discord/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-discord
diff --git a/examples/chart/access/discord/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/discord/tests/__snapshot__/configmap_test.yaml.snap
index 34c07fbf60af8..dcf7708a32173 100644
--- a/examples/chart/access/discord/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/discord/tests/__snapshot__/configmap_test.yaml.snap
@@ -24,6 +24,6 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-discord
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-discord-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-discord-16.4.5
name: RELEASE-NAME-teleport-plugin-discord
diff --git a/examples/chart/access/discord/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/discord/tests/__snapshot__/deployment_test.yaml.snap
index a7880317c1b05..7de8af36bdadf 100644
--- a/examples/chart/access/discord/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/discord/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-discord
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-discord-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-discord-16.4.5
name: RELEASE-NAME-teleport-plugin-discord
spec:
replicas: 1
@@ -22,8 +22,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-discord
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-discord-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-discord-16.4.5
spec:
containers:
- command:
diff --git a/examples/chart/access/email/Chart.yaml b/examples/chart/access/email/Chart.yaml
index 25ddc808ec9f7..22335fa8b66dc 100644
--- a/examples/chart/access/email/Chart.yaml
+++ b/examples/chart/access/email/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-email
diff --git a/examples/chart/access/email/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/email/tests/__snapshot__/configmap_test.yaml.snap
index 8d004f33db686..852f4298a3b4a 100644
--- a/examples/chart/access/email/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/email/tests/__snapshot__/configmap_test.yaml.snap
@@ -26,8 +26,8 @@ should match the snapshot (mailgun on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
should match the snapshot (smtp on):
1: |
@@ -59,8 +59,8 @@ should match the snapshot (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
should match the snapshot (smtp on, no starttls):
1: |
@@ -92,8 +92,8 @@ should match the snapshot (smtp on, no starttls):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
should match the snapshot (smtp on, password file):
1: |
@@ -125,8 +125,8 @@ should match the snapshot (smtp on, password file):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
should match the snapshot (smtp on, roleToRecipients set):
1: |
@@ -161,8 +161,8 @@ should match the snapshot (smtp on, roleToRecipients set):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
should match the snapshot (smtp on, starttls disabled):
1: |
@@ -194,6 +194,6 @@ should match the snapshot (smtp on, starttls disabled):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
diff --git a/examples/chart/access/email/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/email/tests/__snapshot__/deployment_test.yaml.snap
index eb294236c0730..10eb822ee64ce 100644
--- a/examples/chart/access/email/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/email/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should be possible to override volume name (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
spec:
replicas: 1
@@ -22,8 +22,8 @@ should be possible to override volume name (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
spec:
containers:
- command:
@@ -34,7 +34,7 @@ should be possible to override volume name (smtp on):
env:
- name: TELEPORT_PLUGIN_FAIL_FAST
value: "true"
- image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-email
ports:
@@ -75,8 +75,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
spec:
replicas: 1
@@ -90,8 +90,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
spec:
containers:
- command:
@@ -136,8 +136,8 @@ should match the snapshot (mailgun on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
spec:
replicas: 1
@@ -151,8 +151,8 @@ should match the snapshot (mailgun on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
spec:
containers:
- command:
@@ -163,7 +163,7 @@ should match the snapshot (mailgun on):
env:
- name: TELEPORT_PLUGIN_FAIL_FAST
value: "true"
- image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-email
ports:
@@ -204,8 +204,8 @@ should match the snapshot (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
spec:
replicas: 1
@@ -219,8 +219,8 @@ should match the snapshot (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
spec:
containers:
- command:
@@ -231,7 +231,7 @@ should match the snapshot (smtp on):
env:
- name: TELEPORT_PLUGIN_FAIL_FAST
value: "true"
- image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-email
ports:
@@ -272,8 +272,8 @@ should mount external secret (mailgun on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
spec:
replicas: 1
@@ -287,8 +287,8 @@ should mount external secret (mailgun on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
spec:
containers:
- command:
@@ -299,7 +299,7 @@ should mount external secret (mailgun on):
env:
- name: TELEPORT_PLUGIN_FAIL_FAST
value: "true"
- image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-email
ports:
@@ -340,8 +340,8 @@ should mount external secret (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
name: RELEASE-NAME-teleport-plugin-email
spec:
replicas: 1
@@ -355,8 +355,8 @@ should mount external secret (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-email
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-email-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-email-16.4.5
spec:
containers:
- command:
@@ -367,7 +367,7 @@ should mount external secret (smtp on):
env:
- name: TELEPORT_PLUGIN_FAIL_FAST
value: "true"
- image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-email:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-email
ports:
diff --git a/examples/chart/access/jira/Chart.yaml b/examples/chart/access/jira/Chart.yaml
index 17611bbce69c7..33a4b858d6ed7 100644
--- a/examples/chart/access/jira/Chart.yaml
+++ b/examples/chart/access/jira/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-jira
diff --git a/examples/chart/access/jira/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/jira/tests/__snapshot__/configmap_test.yaml.snap
index c56e7307264cd..7acd4c420d981 100644
--- a/examples/chart/access/jira/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/jira/tests/__snapshot__/configmap_test.yaml.snap
@@ -32,6 +32,6 @@ should match the snapshot (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-jira
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-jira-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-jira-16.4.5
name: RELEASE-NAME-teleport-plugin-jira
diff --git a/examples/chart/access/jira/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/jira/tests/__snapshot__/deployment_test.yaml.snap
index 2656310688132..b526c3864421f 100644
--- a/examples/chart/access/jira/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/jira/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-jira
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-jira-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-jira-16.4.5
name: RELEASE-NAME-teleport-plugin-jira
spec:
replicas: 1
@@ -22,8 +22,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-jira
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-jira-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-jira-16.4.5
spec:
containers:
- command:
diff --git a/examples/chart/access/mattermost/Chart.yaml b/examples/chart/access/mattermost/Chart.yaml
index c3ea6427e0bbb..1c427f532aad5 100644
--- a/examples/chart/access/mattermost/Chart.yaml
+++ b/examples/chart/access/mattermost/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-mattermost
diff --git a/examples/chart/access/mattermost/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/mattermost/tests/__snapshot__/configmap_test.yaml.snap
index 7a5b648dfe0a5..6557824214fda 100644
--- a/examples/chart/access/mattermost/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/mattermost/tests/__snapshot__/configmap_test.yaml.snap
@@ -22,6 +22,6 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-mattermost
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-mattermost-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-mattermost-16.4.5
name: RELEASE-NAME-teleport-plugin-mattermost
diff --git a/examples/chart/access/mattermost/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/mattermost/tests/__snapshot__/deployment_test.yaml.snap
index ea6b16870de11..afb29c91c1083 100644
--- a/examples/chart/access/mattermost/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/mattermost/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-mattermost
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-mattermost-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-mattermost-16.4.5
name: RELEASE-NAME-teleport-plugin-mattermost
spec:
replicas: 1
@@ -22,8 +22,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-mattermost
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-mattermost-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-mattermost-16.4.5
spec:
containers:
- command:
@@ -75,8 +75,8 @@ should mount external secret:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-mattermost
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-mattermost-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-mattermost-16.4.5
name: RELEASE-NAME-teleport-plugin-mattermost
spec:
replicas: 1
@@ -90,8 +90,8 @@ should mount external secret:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-mattermost
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-mattermost-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-mattermost-16.4.5
spec:
containers:
- command:
@@ -102,7 +102,7 @@ should mount external secret:
env:
- name: TELEPORT_PLUGIN_FAIL_FAST
value: "true"
- image: public.ecr.aws/gravitational/teleport-plugin-mattermost:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-mattermost:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-mattermost
ports:
@@ -143,8 +143,8 @@ should override volume name:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-mattermost
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-mattermost-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-mattermost-16.4.5
name: RELEASE-NAME-teleport-plugin-mattermost
spec:
replicas: 1
@@ -158,8 +158,8 @@ should override volume name:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-mattermost
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-mattermost-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-mattermost-16.4.5
spec:
containers:
- command:
@@ -170,7 +170,7 @@ should override volume name:
env:
- name: TELEPORT_PLUGIN_FAIL_FAST
value: "true"
- image: public.ecr.aws/gravitational/teleport-plugin-mattermost:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-mattermost:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-mattermost
ports:
diff --git a/examples/chart/access/msteams/Chart.yaml b/examples/chart/access/msteams/Chart.yaml
index 2a55c45f46fdb..0fa701eac07aa 100644
--- a/examples/chart/access/msteams/Chart.yaml
+++ b/examples/chart/access/msteams/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-msteams
diff --git a/examples/chart/access/msteams/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/msteams/tests/__snapshot__/configmap_test.yaml.snap
index 76047e0ff35ed..ed01961e32ca6 100644
--- a/examples/chart/access/msteams/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/msteams/tests/__snapshot__/configmap_test.yaml.snap
@@ -29,6 +29,6 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-msteams
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-msteams-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-msteams-16.4.5
name: RELEASE-NAME-teleport-plugin-msteams
diff --git a/examples/chart/access/msteams/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/msteams/tests/__snapshot__/deployment_test.yaml.snap
index d4102ed81fc77..b3008249a2d35 100644
--- a/examples/chart/access/msteams/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/msteams/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-msteams
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-msteams-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-msteams-16.4.5
name: RELEASE-NAME-teleport-plugin-msteams
spec:
replicas: 1
@@ -22,8 +22,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-msteams
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-msteams-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-msteams-16.4.5
spec:
containers:
- command:
diff --git a/examples/chart/access/pagerduty/Chart.yaml b/examples/chart/access/pagerduty/Chart.yaml
index f6ba37cad4b81..22f13d1a2ea93 100644
--- a/examples/chart/access/pagerduty/Chart.yaml
+++ b/examples/chart/access/pagerduty/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-pagerduty
diff --git a/examples/chart/access/pagerduty/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/pagerduty/tests/__snapshot__/configmap_test.yaml.snap
index d26dc91cf243e..e25d9300b7a0b 100644
--- a/examples/chart/access/pagerduty/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/pagerduty/tests/__snapshot__/configmap_test.yaml.snap
@@ -21,6 +21,6 @@ should match the snapshot (smtp on):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-pagerduty
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-pagerduty-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-pagerduty-16.4.5
name: RELEASE-NAME-teleport-plugin-pagerduty
diff --git a/examples/chart/access/pagerduty/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/pagerduty/tests/__snapshot__/deployment_test.yaml.snap
index 968c57e63550b..2292892ff0b7f 100644
--- a/examples/chart/access/pagerduty/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/pagerduty/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-pagerduty
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-pagerduty-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-pagerduty-16.4.5
name: RELEASE-NAME-teleport-plugin-pagerduty
spec:
replicas: 1
@@ -22,8 +22,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-pagerduty
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-pagerduty-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-pagerduty-16.4.5
spec:
containers:
- command:
diff --git a/examples/chart/access/slack/Chart.yaml b/examples/chart/access/slack/Chart.yaml
index 70716ada9064c..5fcd2d59f8201 100644
--- a/examples/chart/access/slack/Chart.yaml
+++ b/examples/chart/access/slack/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-slack
diff --git a/examples/chart/access/slack/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/access/slack/tests/__snapshot__/configmap_test.yaml.snap
index ca9f7f3727e76..8ec973128ba5d 100644
--- a/examples/chart/access/slack/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/access/slack/tests/__snapshot__/configmap_test.yaml.snap
@@ -24,6 +24,6 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-slack
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-slack-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-slack-16.4.5
name: RELEASE-NAME-teleport-plugin-slack
diff --git a/examples/chart/access/slack/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/access/slack/tests/__snapshot__/deployment_test.yaml.snap
index 07dc3182e2031..ccedde62c7f92 100644
--- a/examples/chart/access/slack/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/access/slack/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-slack
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-slack-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-slack-16.4.5
name: RELEASE-NAME-teleport-plugin-slack
spec:
replicas: 1
@@ -22,8 +22,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-slack
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-slack-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-slack-16.4.5
spec:
containers:
- command:
diff --git a/examples/chart/event-handler/Chart.yaml b/examples/chart/event-handler/Chart.yaml
index c04351a858ece..77dab8838ff9f 100644
--- a/examples/chart/event-handler/Chart.yaml
+++ b/examples/chart/event-handler/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
apiVersion: v2
name: teleport-plugin-event-handler
diff --git a/examples/chart/event-handler/tests/__snapshot__/configmap_test.yaml.snap b/examples/chart/event-handler/tests/__snapshot__/configmap_test.yaml.snap
index ac91f20648234..3a51fd3911d03 100644
--- a/examples/chart/event-handler/tests/__snapshot__/configmap_test.yaml.snap
+++ b/examples/chart/event-handler/tests/__snapshot__/configmap_test.yaml.snap
@@ -26,6 +26,6 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-event-handler
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-event-handler-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-event-handler-16.4.5
name: RELEASE-NAME-teleport-plugin-event-handler
diff --git a/examples/chart/event-handler/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/event-handler/tests/__snapshot__/deployment_test.yaml.snap
index 37de99ef0dd8d..d9ce01d2c3258 100644
--- a/examples/chart/event-handler/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/event-handler/tests/__snapshot__/deployment_test.yaml.snap
@@ -7,8 +7,8 @@ should match the snapshot:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-plugin-event-handler
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-plugin-event-handler-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-plugin-event-handler-16.4.5
name: RELEASE-NAME-teleport-plugin-event-handler
spec:
replicas: 1
@@ -82,7 +82,7 @@ should mount tls.existingCASecretName and set environment when set in values:
value: "true"
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- image: public.ecr.aws/gravitational/teleport-plugin-event-handler:16.4.3
+ image: public.ecr.aws/gravitational/teleport-plugin-event-handler:16.4.5
imagePullPolicy: IfNotPresent
name: teleport-plugin-event-handler
ports:
diff --git a/examples/chart/tbot/Chart.yaml b/examples/chart/tbot/Chart.yaml
index 1002c44587b11..5f13858fe4d69 100644
--- a/examples/chart/tbot/Chart.yaml
+++ b/examples/chart/tbot/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
name: tbot
apiVersion: v2
diff --git a/examples/chart/tbot/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/tbot/tests/__snapshot__/deployment_test.yaml.snap
index 6b8c933afb951..8be7873de2939 100644
--- a/examples/chart/tbot/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/tbot/tests/__snapshot__/deployment_test.yaml.snap
@@ -29,7 +29,7 @@ should match the snapshot (full):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tbot
- helm.sh/chart: tbot-16.4.3
+ helm.sh/chart: tbot-16.4.5
test-key: test-label-pod
spec:
affinity:
@@ -68,7 +68,7 @@ should match the snapshot (full):
value: "1"
- name: TEST_ENV
value: test-value
- image: public.ecr.aws/gravitational/tbot-distroless:16.4.3
+ image: public.ecr.aws/gravitational/tbot-distroless:16.4.5
imagePullPolicy: Always
livenessProbe:
failureThreshold: 6
@@ -154,7 +154,7 @@ should match the snapshot (simple):
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tbot
- helm.sh/chart: tbot-16.4.3
+ helm.sh/chart: tbot-16.4.5
spec:
containers:
- args:
@@ -176,7 +176,7 @@ should match the snapshot (simple):
fieldPath: spec.nodeName
- name: KUBERNETES_TOKEN_PATH
value: /var/run/secrets/tokens/join-sa-token
- image: public.ecr.aws/gravitational/tbot-distroless:16.4.3
+ image: public.ecr.aws/gravitational/tbot-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
diff --git a/examples/chart/teleport-cluster/Chart.yaml b/examples/chart/teleport-cluster/Chart.yaml
index ad0f293d374bf..ef2767f428834 100644
--- a/examples/chart/teleport-cluster/Chart.yaml
+++ b/examples/chart/teleport-cluster/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
name: teleport-cluster
apiVersion: v2
diff --git a/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml b/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml
index 320c36d1b88dd..c8f7c1f17e292 100644
--- a/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml
+++ b/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
name: teleport-operator
apiVersion: v2
diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap
index 4a3638bc59e28..32adb901e155a 100644
--- a/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap
+++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap
@@ -8,8 +8,8 @@ adds operator permissions to ClusterRole:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-cluster
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-cluster-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-cluster-16.4.5
teleport.dev/majorVersion: "16"
name: RELEASE-NAME
rules:
diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap
index 8a2ec59d9e611..c9ba2b5e6352e 100644
--- a/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap
+++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap
@@ -1848,8 +1848,8 @@ sets clusterDomain on Configmap:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-cluster
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-cluster-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-cluster-16.4.5
teleport.dev/majorVersion: "16"
name: RELEASE-NAME-auth
namespace: NAMESPACE
diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap
index 79a1dfa3af43f..e149f940e958a 100644
--- a/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap
+++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap
@@ -8,7 +8,7 @@
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -141,7 +141,7 @@ should set nodeSelector when set in values:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -238,7 +238,7 @@ should set resources when set in values:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -324,7 +324,7 @@ should set securityContext when set in values:
- args:
- --diag-addr=0.0.0.0:3000
- --apply-on-startup=/etc/teleport/apply-on-startup.yaml
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap
index ba80504a0fade..59dbc6c98fe34 100644
--- a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap
+++ b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_config_test.yaml.snap
@@ -567,8 +567,8 @@ sets clusterDomain on Configmap:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-cluster
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-cluster-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-cluster-16.4.5
teleport.dev/majorVersion: "16"
name: RELEASE-NAME-proxy
namespace: NAMESPACE
diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap
index 55570c40da5a9..8b39ce82dd90f 100644
--- a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap
+++ b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap
@@ -11,8 +11,8 @@ sets clusterDomain on Deployment Pods:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-cluster
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-cluster-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-cluster-16.4.5
teleport.dev/majorVersion: "16"
name: RELEASE-NAME-proxy
namespace: NAMESPACE
@@ -26,7 +26,7 @@ sets clusterDomain on Deployment Pods:
template:
metadata:
annotations:
- checksum/config: de39cc585d9931b2ab4483f5bcb595f92e12ea58a26f1ed39ca6d1be72194645
+ checksum/config: ef79a9655f3a6fb231ba72db9f1de214375920c410a80eacb6b99816e4d7b670
kubernetes.io/pod: test-annotation
kubernetes.io/pod-different: 4
labels:
@@ -34,8 +34,8 @@ sets clusterDomain on Deployment Pods:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: teleport-cluster
- app.kubernetes.io/version: 16.4.3
- helm.sh/chart: teleport-cluster-16.4.3
+ app.kubernetes.io/version: 16.4.5
+ helm.sh/chart: teleport-cluster-16.4.5
teleport.dev/majorVersion: "16"
spec:
affinity:
@@ -44,7 +44,7 @@ sets clusterDomain on Deployment Pods:
containers:
- args:
- --diag-addr=0.0.0.0:3000
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -105,7 +105,7 @@ sets clusterDomain on Deployment Pods:
- wait
- no-resolve
- RELEASE-NAME-auth-v15.NAMESPACE.svc.test.com
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
name: wait-auth-update
serviceAccountName: RELEASE-NAME-proxy
terminationGracePeriodSeconds: 60
@@ -137,7 +137,7 @@ should provision initContainer correctly when set in values:
- wait
- no-resolve
- RELEASE-NAME-auth-v15.NAMESPACE.svc.cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
name: wait-auth-update
resources:
limits:
@@ -201,7 +201,7 @@ should set nodeSelector when set in values:
containers:
- args:
- --diag-addr=0.0.0.0:3000
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -262,7 +262,7 @@ should set nodeSelector when set in values:
- wait
- no-resolve
- RELEASE-NAME-auth-v15.NAMESPACE.svc.cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
name: wait-auth-update
nodeSelector:
environment: security
@@ -313,7 +313,7 @@ should set resources for wait-auth-update initContainer when set in values:
containers:
- args:
- --diag-addr=0.0.0.0:3000
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -381,7 +381,7 @@ should set resources for wait-auth-update initContainer when set in values:
- wait
- no-resolve
- RELEASE-NAME-auth-v15.NAMESPACE.svc.cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
name: wait-auth-update
resources:
limits:
@@ -421,7 +421,7 @@ should set resources when set in values:
containers:
- args:
- --diag-addr=0.0.0.0:3000
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -489,7 +489,7 @@ should set resources when set in values:
- wait
- no-resolve
- RELEASE-NAME-auth-v15.NAMESPACE.svc.cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
name: wait-auth-update
resources:
limits:
@@ -529,7 +529,7 @@ should set securityContext for initContainers when set in values:
containers:
- args:
- --diag-addr=0.0.0.0:3000
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -597,7 +597,7 @@ should set securityContext for initContainers when set in values:
- wait
- no-resolve
- RELEASE-NAME-auth-v15.NAMESPACE.svc.cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
name: wait-auth-update
securityContext:
allowPrivilegeEscalation: false
@@ -637,7 +637,7 @@ should set securityContext when set in values:
containers:
- args:
- --diag-addr=0.0.0.0:3000
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -705,7 +705,7 @@ should set securityContext when set in values:
- wait
- no-resolve
- RELEASE-NAME-auth-v15.NAMESPACE.svc.cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
name: wait-auth-update
securityContext:
allowPrivilegeEscalation: false
diff --git a/examples/chart/teleport-kube-agent/Chart.yaml b/examples/chart/teleport-kube-agent/Chart.yaml
index ffbe1c28b903f..cde1623d10d9c 100644
--- a/examples/chart/teleport-kube-agent/Chart.yaml
+++ b/examples/chart/teleport-kube-agent/Chart.yaml
@@ -1,4 +1,4 @@
-.version: &version "16.4.3"
+.version: &version "16.4.5"
name: teleport-kube-agent
apiVersion: v2
diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap
index 3c07602635438..1daa2ed24ea12 100644
--- a/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap
+++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap
@@ -32,7 +32,7 @@ sets Deployment annotations when specified if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -109,7 +109,7 @@ sets Deployment labels when specified if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -173,7 +173,7 @@ sets Pod annotations when specified if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -237,7 +237,7 @@ sets Pod labels when specified if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -322,7 +322,7 @@ should add emptyDir for data when existingDataVolume is not set if action is Upg
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -387,7 +387,7 @@ should add insecureSkipProxyTLSVerify to args when set in values if action is Up
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -451,7 +451,7 @@ should correctly configure existingDataVolume when set if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -513,7 +513,7 @@ should expose diag port if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -589,7 +589,7 @@ should have multiple replicas when replicaCount is set (using .replicaCount, dep
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -665,7 +665,7 @@ should have multiple replicas when replicaCount is set (using highAvailability.r
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -729,7 +729,7 @@ should have one replica when replicaCount is not set if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -793,7 +793,7 @@ should mount extraVolumes and extraVolumeMounts if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -862,7 +862,7 @@ should mount jamfCredentialsSecret if it already exists and when role is jamf an
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -932,7 +932,7 @@ should mount jamfCredentialsSecret.name when role is jamf and action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1004,7 +1004,7 @@ should mount tls.existingCASecretName and set environment when set in values if
value: cluster.local
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1078,7 +1078,7 @@ should mount tls.existingCASecretName and set extra environment when set in valu
value: http://username:password@my.proxy.host:3128
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1148,7 +1148,7 @@ should provision initContainer correctly when set in values if action is Upgrade
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1270,7 +1270,7 @@ should set affinity when set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1334,7 +1334,7 @@ should set default serviceAccountName when not set in values if action is Upgrad
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1411,7 +1411,7 @@ should set environment when extraEnv set in values if action is Upgrade:
value: cluster.local
- name: HTTPS_PROXY
value: http://username:password@my.proxy.host:3128
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1539,7 +1539,7 @@ should set imagePullPolicy when set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: Always
livenessProbe:
failureThreshold: 6
@@ -1603,7 +1603,7 @@ should set nodeSelector if set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1669,7 +1669,7 @@ should set not set priorityClassName when not set in values if action is Upgrade
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1745,7 +1745,7 @@ should set preferred affinity when more than one replica is used if action is Up
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1809,7 +1809,7 @@ should set priorityClassName when set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1874,7 +1874,7 @@ should set probeTimeoutSeconds when set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1948,7 +1948,7 @@ should set required affinity when highAvailability.requireAntiAffinity is set if
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2012,7 +2012,7 @@ should set resources when set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2083,7 +2083,7 @@ should set serviceAccountName when set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2147,7 +2147,7 @@ should set tolerations when set in values if action is Upgrade:
value: "true"
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap
index 225edfa2ef2cb..55d353a172149 100644
--- a/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap
+++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap
@@ -25,7 +25,7 @@ should create ServiceAccount for post-delete hook by default:
fieldPath: metadata.namespace
- name: RELEASE_NAME
value: RELEASE-NAME
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
name: post-delete-job
securityContext:
@@ -108,7 +108,7 @@ should not create ServiceAccount for post-delete hook if serviceAccount.create i
fieldPath: metadata.namespace
- name: RELEASE_NAME
value: RELEASE-NAME
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
name: post-delete-job
securityContext:
@@ -138,7 +138,7 @@ should not create ServiceAccount, Role or RoleBinding for post-delete hook if se
fieldPath: metadata.namespace
- name: RELEASE_NAME
value: RELEASE-NAME
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
name: post-delete-job
securityContext:
@@ -168,7 +168,7 @@ should set nodeSelector in post-delete hook:
fieldPath: metadata.namespace
- name: RELEASE_NAME
value: RELEASE-NAME
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
name: post-delete-job
securityContext:
@@ -200,7 +200,7 @@ should set resources in the Job's pod spec if resources is set in values:
fieldPath: metadata.namespace
- name: RELEASE_NAME
value: RELEASE-NAME
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
name: post-delete-job
resources:
diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap
index 1a122dbddce75..7f59b4780c183 100644
--- a/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap
+++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap
@@ -18,7 +18,7 @@ sets Pod annotations when specified:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -90,7 +90,7 @@ sets Pod labels when specified:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -186,7 +186,7 @@ sets StatefulSet labels when specified:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -290,7 +290,7 @@ should add insecureSkipProxyTLSVerify to args when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -362,7 +362,7 @@ should add volumeClaimTemplate for data volume when using StatefulSet and action
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -454,7 +454,7 @@ should add volumeClaimTemplate for data volume when using StatefulSet and is Fre
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -536,7 +536,7 @@ should add volumeMount for data volume when using StatefulSet:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -608,7 +608,7 @@ should expose diag port:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -680,7 +680,7 @@ should generate Statefulset when storage is disabled and mode is a Upgrade:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -766,7 +766,7 @@ should have multiple replicas when replicaCount is set (using .replicaCount, dep
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -850,7 +850,7 @@ should have multiple replicas when replicaCount is set (using highAvailability.r
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -922,7 +922,7 @@ should have one replica when replicaCount is not set:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -994,7 +994,7 @@ should install Statefulset when storage is disabled and mode is a Fresh Install:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1068,7 +1068,7 @@ should mount extraVolumes and extraVolumeMounts:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1145,7 +1145,7 @@ should mount jamfCredentialsSecret if it already exists and when role is jamf:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1225,7 +1225,7 @@ should mount jamfCredentialsSecret.name when role is jamf:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1307,7 +1307,7 @@ should mount tls.existingCASecretName and set environment when set in values:
value: cluster.local
- name: SSL_CERT_FILE
value: /etc/teleport-tls-ca/ca.pem
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1391,7 +1391,7 @@ should mount tls.existingCASecretName and set extra environment when set in valu
value: /etc/teleport-tls-ca/ca.pem
- name: HTTPS_PROXY
value: http://username:password@my.proxy.host:3128
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1471,7 +1471,7 @@ should not add emptyDir for data when using StatefulSet:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1543,7 +1543,7 @@ should provision initContainer correctly when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1673,7 +1673,7 @@ should set affinity when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1745,7 +1745,7 @@ should set default serviceAccountName when not set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1830,7 +1830,7 @@ should set environment when extraEnv set in values:
value: cluster.local
- name: HTTPS_PROXY
value: http://username:password@my.proxy.host:3128
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -1974,7 +1974,7 @@ should set imagePullPolicy when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: Always
livenessProbe:
failureThreshold: 6
@@ -2046,7 +2046,7 @@ should set nodeSelector if set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2132,7 +2132,7 @@ should set preferred affinity when more than one replica is used:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2204,7 +2204,7 @@ should set probeTimeoutSeconds when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2286,7 +2286,7 @@ should set required affinity when highAvailability.requireAntiAffinity is set:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2358,7 +2358,7 @@ should set resources when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2437,7 +2437,7 @@ should set serviceAccountName when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2509,7 +2509,7 @@ should set storage.requests when set in values and action is an Upgrade:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2581,7 +2581,7 @@ should set storage.storageClassName when set in values and action is an Upgrade:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -2653,7 +2653,7 @@ should set tolerations when set in values:
value: RELEASE-NAME
- name: TELEPORT_KUBE_CLUSTER_DOMAIN
value: cluster.local
- image: public.ecr.aws/gravitational/teleport-distroless:16.4.3
+ image: public.ecr.aws/gravitational/teleport-distroless:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap
index 7ce36f78ff50c..04cc6c8d3d73a 100644
--- a/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap
+++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap
@@ -27,7 +27,7 @@ sets the affinity:
- --base-image=public.ecr.aws/gravitational/teleport-distroless
- --version-server=https://my-custom-version-server/v1
- --version-channel=custom/preview
- image: public.ecr.aws/gravitational/teleport-kube-agent-updater:16.4.3
+ image: public.ecr.aws/gravitational/teleport-kube-agent-updater:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
@@ -73,7 +73,7 @@ sets the tolerations:
- --base-image=public.ecr.aws/gravitational/teleport-distroless
- --version-server=https://my-custom-version-server/v1
- --version-channel=custom/preview
- image: public.ecr.aws/gravitational/teleport-kube-agent-updater:16.4.3
+ image: public.ecr.aws/gravitational/teleport-kube-agent-updater:16.4.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
diff --git a/integration/hsm/hsm_test.go b/integration/hsm/hsm_test.go
index 5c7427b06a148..935a8709e5dfb 100644
--- a/integration/hsm/hsm_test.go
+++ b/integration/hsm/hsm_test.go
@@ -677,7 +677,10 @@ func TestHSMRevert(t *testing.T) {
clock.Advance(2 * defaults.HighResPollingPeriod)
assert.EventuallyWithT(t, func(t *assert.CollectT) {
alerts, err = auth1.process.GetAuthServer().GetClusterAlerts(ctx, types.GetClusterAlertsRequest{})
- require.NoError(t, err)
+ assert.NoError(t, err)
assert.Empty(t, alerts)
+
+ // Keep advancing the clock to make sure the rotation ticker gets fired
+ clock.Advance(2 * defaults.HighResPollingPeriod)
}, 5*time.Second, 100*time.Millisecond)
}
diff --git a/lib/auth/auth.go b/lib/auth/auth.go
index 09cd9127a3a91..540df63049468 100644
--- a/lib/auth/auth.go
+++ b/lib/auth/auth.go
@@ -722,6 +722,14 @@ var (
},
)
+ roleCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: teleport.MetricNamespace,
+ Name: "roles_total",
+ Help: "Number of roles that exist in the cluster",
+ },
+ )
+
registeredAgents = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: teleport.MetricNamespace,
@@ -806,6 +814,7 @@ var (
accessRequestsCreatedMetric,
registeredAgentsInstallMethod,
userCertificatesGeneratedMetric,
+ roleCount,
}
)
@@ -1269,87 +1278,115 @@ func (a *Server) periodicSyncUpgradeWindowStartHour() {
}
}
+// periodicIntervalKey is used to uniquely identify the subintervals registered with
+// the interval.MultiInterval instance that we use for managing periodics operations.
+
+type periodicIntervalKey int
+
+const (
+ heartbeatCheckKey periodicIntervalKey = 1 + iota
+ rotationCheckKey
+ metricsKey
+ releaseCheckKey
+ localReleaseCheckKey
+ instancePeriodicsKey
+ dynamicLabelsCheckKey
+ desktopCheckKey
+ upgradeWindowCheckKey
+ roleCountKey
+)
+
// runPeriodicOperations runs some periodic bookkeeping operations
// performed by auth server
func (a *Server) runPeriodicOperations() {
- ctx := context.TODO()
+ firstReleaseCheck := utils.FullJitter(time.Hour * 6)
+
+ // this environment variable is "unstable" since it will be deprecated
+ // by an upcoming tctl command. currently exists for testing purposes only.
+ if os.Getenv("TELEPORT_UNSTABLE_VC_SYNC_ON_START") == "yes" {
+ firstReleaseCheck = utils.HalfJitter(time.Second * 10)
+ }
+
// run periodic functions with a semi-random period
// to avoid contention on the database in case if there are multiple
// auth servers running - so they don't compete trying
// to update the same resources.
r := insecurerand.New(insecurerand.NewSource(a.GetClock().Now().UnixNano()))
period := defaults.HighResPollingPeriod + time.Duration(r.Intn(int(defaults.HighResPollingPeriod/time.Second)))*time.Second
- log.Debugf("Ticking with period: %v.", period)
- a.lock.RLock()
- ticker := a.clock.NewTicker(period)
- a.lock.RUnlock()
- // Create a ticker with jitter
- heartbeatCheckTicker := interval.New(interval.Config{
- Duration: apidefaults.ServerKeepAliveTTL() * 2,
- Jitter: retryutils.NewSeventhJitter(),
- })
- promTicker := interval.New(interval.Config{
- FirstDuration: 5 * time.Second,
- Duration: defaults.PrometheusScrapeInterval,
- Jitter: retryutils.NewSeventhJitter(),
- })
- missedKeepAliveCount := 0
- defer ticker.Stop()
- defer heartbeatCheckTicker.Stop()
- defer promTicker.Stop()
- firstReleaseCheck := utils.FullJitter(time.Hour * 6)
+ ticker := interval.NewMulti(
+ a.GetClock(),
+ interval.SubInterval[periodicIntervalKey]{
+ Key: rotationCheckKey,
+ Duration: period,
+ },
+ interval.SubInterval[periodicIntervalKey]{
+ Key: metricsKey,
+ Duration: defaults.PrometheusScrapeInterval,
+ FirstDuration: 5 * time.Second,
+ Jitter: retryutils.NewSeventhJitter(),
+ },
+ interval.SubInterval[periodicIntervalKey]{
+ Key: instancePeriodicsKey,
+ Duration: 9 * time.Minute,
+ FirstDuration: utils.HalfJitter(time.Minute),
+ Jitter: retryutils.NewSeventhJitter(),
+ },
+ interval.SubInterval[periodicIntervalKey]{
+ Key: roleCountKey,
+ Duration: 12 * time.Hour,
+ FirstDuration: utils.FullJitter(time.Minute),
+ Jitter: retryutils.NewSeventhJitter(),
+ },
+ )
- // this environment variable is "unstable" since it will be deprecated
- // by an upcoming tctl command. currently exists for testing purposes only.
- if os.Getenv("TELEPORT_UNSTABLE_VC_SYNC_ON_START") == "yes" {
- firstReleaseCheck = utils.HalfJitter(time.Second * 10)
- }
+ defer ticker.Stop()
- // note the use of FullJitter for the releases check interval. this lets us ensure
- // that frequent restarts don't prevent checks from happening despite the infrequent
- // effective check rate.
- releaseCheck := interval.New(interval.Config{
- Duration: time.Hour * 24,
- FirstDuration: firstReleaseCheck,
- Jitter: retryutils.NewFullJitter(),
- })
- defer releaseCheck.Stop()
-
- // more frequent release check that just re-calculates alerts based on previously
- // pulled versioning info.
- localReleaseCheck := interval.New(interval.Config{
- Duration: time.Minute * 10,
- FirstDuration: utils.HalfJitter(time.Second * 10),
- Jitter: retryutils.NewHalfJitter(),
- })
- defer localReleaseCheck.Stop()
+ missedKeepAliveCount := 0
- instancePeriodics := interval.New(interval.Config{
- Duration: time.Minute * 9,
- FirstDuration: utils.HalfJitter(time.Minute),
- Jitter: retryutils.NewSeventhJitter(),
- })
- defer instancePeriodics.Stop()
+ // Prevent some periodic operations from running for dashboard tenants.
+ if !services.IsDashboard(*modules.GetModules().Features().ToProto()) {
+ ticker.Push(interval.SubInterval[periodicIntervalKey]{
+ Key: dynamicLabelsCheckKey,
+ Duration: dynamicLabelCheckPeriod,
+ FirstDuration: utils.HalfJitter(10 * time.Second),
+ Jitter: retryutils.NewSeventhJitter(),
+ })
+ ticker.Push(interval.SubInterval[periodicIntervalKey]{
+ Key: heartbeatCheckKey,
+ Duration: apidefaults.ServerKeepAliveTTL() * 2,
+ Jitter: retryutils.NewSeventhJitter(),
+ })
+ ticker.Push(interval.SubInterval[periodicIntervalKey]{
+ Key: releaseCheckKey,
+ Duration: 24 * time.Hour,
+ FirstDuration: firstReleaseCheck,
+ // note the use of FullJitter for the releases check interval. this lets us ensure
+ // that frequent restarts don't prevent checks from happening despite the infrequent
+ // effective check rate.
+ Jitter: retryutils.NewFullJitter(),
+ })
+ // more frequent release check that just re-calculates alerts based on previously
+ // pulled versioning info.
+ ticker.Push(interval.SubInterval[periodicIntervalKey]{
+ Key: localReleaseCheckKey,
+ Duration: 10 * time.Minute,
+ FirstDuration: utils.HalfJitter(10 * time.Second),
+ Jitter: retryutils.NewHalfJitter(),
+ })
+ }
- var ossDesktopsCheck <-chan time.Time
if modules.GetModules().IsOSSBuild() {
- ossDesktopsCheck = interval.New(interval.Config{
+ ticker.Push(interval.SubInterval[periodicIntervalKey]{
+ Key: desktopCheckKey,
Duration: OSSDesktopsCheckPeriod,
- FirstDuration: utils.HalfJitter(time.Second * 10),
+ FirstDuration: utils.HalfJitter(10 * time.Second),
Jitter: retryutils.NewHalfJitter(),
- }).Next()
- } else if err := a.DeleteClusterAlert(ctx, OSSDesktopsAlertID); err != nil && !trace.IsNotFound(err) {
+ })
+ } else if err := a.DeleteClusterAlert(a.closeCtx, OSSDesktopsAlertID); err != nil && !trace.IsNotFound(err) {
log.Warnf("Can't delete OSS non-AD desktops limit alert: %v", err)
}
- dynamicLabelsCheck := interval.New(interval.Config{
- Duration: dynamicLabelCheckPeriod,
- FirstDuration: utils.HalfJitter(time.Second * 10),
- Jitter: retryutils.NewSeventhJitter(),
- })
- defer dynamicLabelsCheck.Stop()
-
// isolate the schedule of potentially long-running refreshRemoteClusters() from other tasks
go func() {
// reasonably small interval to ensure that users observe clusters as online within 1 minute of adding them.
@@ -1364,7 +1401,7 @@ func (a *Server) runPeriodicOperations() {
case <-a.closeCtx.Done():
return
case <-remoteClustersRefresh.Next():
- a.refreshRemoteClusters(ctx, r)
+ a.refreshRemoteClusters(a.closeCtx, r)
}
}
}()
@@ -1372,60 +1409,118 @@ func (a *Server) runPeriodicOperations() {
// cloud auth servers need to periodically sync the upgrade window
// from the cloud db.
if modules.GetModules().Features().Cloud {
- go a.periodicSyncUpgradeWindowStartHour()
- }
-
- // disable periodics that are not required for cloud dashboard tenants
- if services.IsDashboard(*modules.GetModules().Features().ToProto()) {
- releaseCheck.Stop()
- localReleaseCheck.Stop()
- heartbeatCheckTicker.Stop()
- dynamicLabelsCheck.Stop()
+ ticker.Push(interval.SubInterval[periodicIntervalKey]{
+ Key: upgradeWindowCheckKey,
+ Duration: 3 * time.Minute,
+ FirstDuration: utils.FullJitter(30 * time.Second),
+ Jitter: retryutils.NewSeventhJitter(),
+ })
}
for {
select {
case <-a.closeCtx.Done():
return
- case <-ticker.Chan():
- err := a.autoRotateCertAuthorities(ctx)
- if err != nil {
- if trace.IsCompareFailed(err) {
- log.Debugf("Cert authority has been updated concurrently: %v.", err)
- } else {
- log.Errorf("Failed to perform cert rotation check: %v.", err)
- }
- }
- case <-heartbeatCheckTicker.Next():
- nodes, err := a.GetNodes(ctx, apidefaults.Namespace)
- if err != nil {
- log.Errorf("Failed to load nodes for heartbeat metric calculation: %v", err)
- }
- for _, node := range nodes {
- if services.NodeHasMissedKeepAlives(node) {
- missedKeepAliveCount++
- }
+ case tick := <-ticker.Next():
+ switch tick.Key {
+ case rotationCheckKey:
+ go func() {
+ if err := a.autoRotateCertAuthorities(a.closeCtx); err != nil {
+ if trace.IsCompareFailed(err) {
+ log.Debugf("Cert authority has been updated concurrently: %v.", err)
+ } else {
+ log.Errorf("Failed to perform cert rotation check: %v.", err)
+ }
+ }
+ }()
+ case heartbeatCheckKey:
+ go func() {
+ req := &proto.ListUnifiedResourcesRequest{Kinds: []string{types.KindNode}, SortBy: types.SortBy{Field: types.ResourceKind}}
+
+ for {
+ _, next, err := a.UnifiedResourceCache.IterateUnifiedResources(a.closeCtx,
+ func(rwl types.ResourceWithLabels) (bool, error) {
+ srv, ok := rwl.(types.Server)
+ if !ok {
+ return false, nil
+ }
+ if services.NodeHasMissedKeepAlives(srv) {
+ missedKeepAliveCount++
+ }
+ return false, nil
+ },
+ req,
+ )
+ if err != nil {
+ log.Errorf("Failed to load nodes for heartbeat metric calculation: %v", err)
+ return
+ }
+
+ req.StartKey = next
+ if req.StartKey == "" {
+ break
+ }
+ }
+
+ // Update prometheus gauge
+ heartbeatsMissedByAuth.Set(float64(missedKeepAliveCount))
+ }()
+ case metricsKey:
+ go a.updateAgentMetrics()
+ case releaseCheckKey:
+ go a.syncReleaseAlerts(a.closeCtx, true)
+ case localReleaseCheckKey:
+ go a.syncReleaseAlerts(a.closeCtx, false)
+ case instancePeriodicsKey:
+ go a.doInstancePeriodics(a.closeCtx)
+ case desktopCheckKey:
+ go a.syncDesktopsLimitAlert(a.closeCtx)
+ case dynamicLabelsCheckKey:
+ go a.syncDynamicLabelsAlert(a.closeCtx)
+ case upgradeWindowCheckKey:
+ go a.periodicSyncUpgradeWindowStartHour()
+ case roleCountKey:
+ go a.tallyRoles(a.closeCtx)
}
- // Update prometheus gauge
- heartbeatsMissedByAuth.Set(float64(missedKeepAliveCount))
- case <-promTicker.Next():
- a.updateAgentMetrics()
- case <-releaseCheck.Next():
- a.syncReleaseAlerts(ctx, true)
- case <-localReleaseCheck.Next():
- a.syncReleaseAlerts(ctx, false)
- case <-instancePeriodics.Next():
- // instance periodics are rate-limited and may be time-consuming in large
- // clusters, so launch them in the background.
- go a.doInstancePeriodics(ctx)
- case <-ossDesktopsCheck:
- a.syncDesktopsLimitAlert(ctx)
- case <-dynamicLabelsCheck.Next():
- a.syncDynamicLabelsAlert(ctx)
}
}
}
+func (a *Server) tallyRoles(ctx context.Context) {
+ var count = 0
+ log.Debug("tallying roles")
+ defer func() {
+ log.Debugf("tallying roles completed, role_count=%d", count)
+ }()
+
+ req := &proto.ListRolesRequest{Limit: 20}
+
+ readLimiter := time.NewTicker(20 * time.Millisecond)
+ defer readLimiter.Stop()
+
+ for {
+ resp, err := a.Cache.ListRoles(ctx, req)
+ if err != nil {
+ return
+ }
+
+ count += len(resp.Roles)
+ req.StartKey = resp.NextKey
+
+ if req.StartKey == "" {
+ break
+ }
+
+ select {
+ case <-readLimiter.C:
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ roleCount.Set(float64(count))
+}
+
func (a *Server) doInstancePeriodics(ctx context.Context) {
const slowRate = time.Millisecond * 200 // 5 reads per second
const fastRate = time.Millisecond * 5 // 200 reads per second
diff --git a/lib/inventory/controller.go b/lib/inventory/controller.go
index 61d9da851bc16..ae5258cf97630 100644
--- a/lib/inventory/controller.go
+++ b/lib/inventory/controller.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/gravitational/trace"
+ "github.com/jonboulle/clockwork"
log "github.com/sirupsen/logrus"
"github.com/gravitational/teleport/api/client"
@@ -262,12 +263,14 @@ func (c *Controller) RegisterControlStream(stream client.UpstreamInventoryContro
// as much as possible. this is intended to mitigate load spikes on auth restart, and is reasonably
// safe to do since the instance resource is not directly relied upon for use of any particular teleport
// service.
- ticker := interval.NewMulti(interval.SubInterval[intervalKey]{
- Key: instanceHeartbeatKey,
- VariableDuration: c.instanceHBVariableDuration,
- FirstDuration: fullJitter(c.instanceHBVariableDuration.Duration()),
- Jitter: seventhJitter,
- })
+ ticker := interval.NewMulti(
+ clockwork.NewRealClock(),
+ interval.SubInterval[intervalKey]{
+ Key: instanceHeartbeatKey,
+ VariableDuration: c.instanceHBVariableDuration,
+ FirstDuration: fullJitter(c.instanceHBVariableDuration.Duration()),
+ Jitter: seventhJitter,
+ })
handle := newUpstreamHandle(stream, hello, ticker)
c.store.Insert(handle)
go c.handleControlStream(handle)
diff --git a/lib/utils/interval/multi.go b/lib/utils/interval/multi.go
index 9932203ea6076..f3c1fae80d79f 100644
--- a/lib/utils/interval/multi.go
+++ b/lib/utils/interval/multi.go
@@ -23,6 +23,8 @@ import (
"sync"
"time"
+ "github.com/jonboulle/clockwork"
+
"github.com/gravitational/teleport/api/utils/retryutils"
)
@@ -39,6 +41,7 @@ import (
// but it is still a potential source of bugs/confusion when transitioning to using this type from one
// of the single-interval alternatives.
type MultiInterval[T comparable] struct {
+ clock clockwork.Clock
subs []subIntervalEntry[T]
push chan subIntervalEntry[T]
ch chan Tick[T]
@@ -125,12 +128,17 @@ func (s *subIntervalEntry[T]) increment() {
// NewMulti creates a new multi-interval instance. This function panics on non-positive
// interval durations (equivalent to time.NewTicker) or if no sub-intervals are provided.
-func NewMulti[T comparable](intervals ...SubInterval[T]) *MultiInterval[T] {
+func NewMulti[T comparable](clock clockwork.Clock, intervals ...SubInterval[T]) *MultiInterval[T] {
if len(intervals) == 0 {
panic(errors.New("empty sub-interval set for interval.NewMulti"))
}
+ if clock == nil {
+ clock = clockwork.NewRealClock()
+ }
+
interval := &MultiInterval[T]{
+ clock: clock,
subs: make([]subIntervalEntry[T], 0, len(intervals)),
push: make(chan subIntervalEntry[T]),
ch: make(chan Tick[T], 1),
@@ -140,7 +148,7 @@ func NewMulti[T comparable](intervals ...SubInterval[T]) *MultiInterval[T] {
}
// check and initialize our sub-intervals.
- now := time.Now()
+ now := clock.Now()
for _, sub := range intervals {
if sub.Duration <= 0 && (sub.VariableDuration == nil || sub.VariableDuration.Duration() <= 0) {
panic(errors.New("non-positive sub interval for interval.NewMulti"))
@@ -156,7 +164,7 @@ func NewMulti[T comparable](intervals ...SubInterval[T]) *MultiInterval[T] {
// start the timer in this goroutine to improve
// consistency of first tick.
- timer := time.NewTimer(d)
+ timer := clock.NewTimer(d)
go interval.run(timer, key)
@@ -173,7 +181,7 @@ func (i *MultiInterval[T]) Push(sub SubInterval[T]) {
SubInterval: sub,
}
// we initialize here in order to improve consistency of start time
- entry.init(time.Now())
+ entry.init(i.clock.Now())
select {
case i.push <- entry:
case <-i.done:
@@ -257,7 +265,7 @@ func (i *MultiInterval[T]) pushEntry(entry subIntervalEntry[T]) {
i.subs = append(i.subs, entry)
}
-func (i *MultiInterval[T]) run(timer *time.Timer, key T) {
+func (i *MultiInterval[T]) run(timer clockwork.Timer, key T) {
defer timer.Stop()
var pending pendingTicks[T]
@@ -276,7 +284,7 @@ func (i *MultiInterval[T]) run(timer *time.Timer, key T) {
}
select {
- case t := <-timer.C:
+ case t := <-timer.Chan():
// increment the sub-interval for the current key
i.increment(key)
@@ -292,7 +300,7 @@ func (i *MultiInterval[T]) run(timer *time.Timer, key T) {
timer.Reset(d)
case resetKey := <-i.reset:
- now := time.Now()
+ now := i.clock.Now()
// reset the sub-interval for the target key
i.resetEntry(now, resetKey)
@@ -307,14 +315,14 @@ func (i *MultiInterval[T]) run(timer *time.Timer, key T) {
// stop and drain timer
if !timer.Stop() {
- <-timer.C
+ <-timer.Chan()
}
// apply the new duration
timer.Reset(d)
case fireKey := <-i.fire:
- now := time.Now()
+ now := i.clock.Now()
// reset the sub-interval for the key we are firing
i.resetEntry(now, fireKey)
@@ -329,13 +337,13 @@ func (i *MultiInterval[T]) run(timer *time.Timer, key T) {
// stop and drain timer.
if !timer.Stop() {
- <-timer.C
+ <-timer.Chan()
}
// re-set the timer
timer.Reset(d)
case entry := <-i.push:
- now := time.Now()
+ now := i.clock.Now()
// add the new sub-interval entry
i.pushEntry(entry)
@@ -351,7 +359,7 @@ func (i *MultiInterval[T]) run(timer *time.Timer, key T) {
// stop and drain timer
if !timer.Stop() {
- <-timer.C
+ <-timer.Chan()
}
// apply the new duration
diff --git a/lib/utils/interval/multi_test.go b/lib/utils/interval/multi_test.go
index 37cf3a10b1614..3ef8b1f17ad56 100644
--- a/lib/utils/interval/multi_test.go
+++ b/lib/utils/interval/multi_test.go
@@ -24,6 +24,7 @@ import (
"testing"
"time"
+ "github.com/jonboulle/clockwork"
"github.com/stretchr/testify/require"
)
@@ -47,10 +48,12 @@ func TestMultiIntervalReset(t *testing.T) {
resetTimer := time.NewTimer(duration / 3)
defer resetTimer.Stop()
- interval := NewMulti[string](SubInterval[string]{
- Key: "key",
- Duration: duration,
- })
+ interval := NewMulti[string](
+ clockwork.NewRealClock(),
+ SubInterval[string]{
+ Key: "key",
+ Duration: duration,
+ })
defer interval.Stop()
start := time.Now()
@@ -92,6 +95,7 @@ func TestMultiIntervalReset(t *testing.T) {
func TestMultiIntervalBasics(t *testing.T) {
t.Parallel()
interval := NewMulti[string](
+ clockwork.NewRealClock(),
SubInterval[string]{
Key: "fast",
Duration: time.Millisecond * 8,
@@ -151,6 +155,7 @@ func TestMultiIntervalVariableDuration(t *testing.T) {
bar.counter.Store(1)
interval := NewMulti[string](
+ clockwork.NewRealClock(),
SubInterval[string]{
Key: "foo",
VariableDuration: foo,
@@ -216,6 +221,7 @@ func TestMultiIntervalVariableDuration(t *testing.T) {
func TestMultiIntervalPush(t *testing.T) {
t.Parallel()
interval := NewMulti[string](
+ clockwork.NewRealClock(),
SubInterval[string]{
Key: "foo",
Duration: time.Millisecond * 6,
@@ -289,6 +295,7 @@ func TestMultiIntervalFireNow(t *testing.T) {
// set up one sub-interval that fires frequently, and another that will never
// fire during this test unless we trigger with FireNow.
interval := NewMulti[string](
+ clockwork.NewRealClock(),
SubInterval[string]{
Key: "slow",
Duration: time.Hour,
diff --git a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.tsx b/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.tsx
index f16b938ba7534..d3dba75b8e1f9 100644
--- a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.tsx
+++ b/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.tsx
@@ -359,14 +359,6 @@ export function generateCmd(data: GenerateCmdProps) {
// AutomaticUpgradesTargetVersion contains a v, eg, v13.4.2.
// However, helm chart expects no 'v', eg, 13.4.2.
deployVersion = data.automaticUpgradesTargetVersion.replace(/^v/, '');
-
- // TODO(marco): remove when stable/cloud moves to v14
- // For v13 releases of the helm chart, we must remove the App role.
- // We get the following error otherwise:
- // Error: INSTALLATION FAILED: execution error at (teleport-kube-agent/templates/statefulset.yaml:26:28): at least one of 'apps' and 'appResources' is required in chart values when app role is enabled, see README
- if (deployVersion.startsWith('13.')) {
- roles = ['Kube'];
- }
}
const yamlRoles = roles.join(',').toLowerCase();