diff --git a/lib/auth/integration/integrationv1/service_test.go b/lib/auth/integration/integrationv1/service_test.go
index a8cfc66e4e45e..10a1e03eff206 100644
--- a/lib/auth/integration/integrationv1/service_test.go
+++ b/lib/auth/integration/integrationv1/service_test.go
@@ -36,6 +36,7 @@ import (
"github.com/gravitational/teleport/lib/authz"
"github.com/gravitational/teleport/lib/backend/memory"
"github.com/gravitational/teleport/lib/events"
+ "github.com/gravitational/teleport/lib/fixtures"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/services/local"
"github.com/gravitational/teleport/lib/tlsca"
@@ -376,15 +377,17 @@ func TestIntegrationCRUD(t *testing.T) {
Setup: func(t *testing.T, igName string) {
_, err := localClient.CreateIntegration(ctx, sampleIntegrationFn(t, igName))
require.NoError(t, err)
- require.NoError(t, localClient.CreatePlugin(ctx, newPlugin(t, igName)))
+ // other existing plugin should not affect identity center plugin referenced integration.
+ require.NoError(t, localClient.CreatePlugin(ctx, fixtures.NewMattermostPlugin(t)))
+ require.NoError(t, localClient.CreatePlugin(ctx, fixtures.NewIdentityCenterPlugin(t, igName, igName)))
},
Test: func(ctx context.Context, resourceSvc *Service, igName string) error {
_, err := resourceSvc.DeleteIntegration(ctx, &integrationpb.DeleteIntegrationRequest{Name: igName})
return err
},
Cleanup: func(t *testing.T, igName string) {
- err := localClient.DeletePlugin(ctx, newPlugin(t, igName).GetName())
- require.NoError(t, err)
+ require.NoError(t, localClient.DeletePlugin(ctx, types.PluginTypeMattermost))
+ require.NoError(t, localClient.DeletePlugin(ctx, types.PluginTypeAWSIdentityCenter))
},
ErrAssertion: trace.IsBadParameter,
},
@@ -721,31 +724,6 @@ func newCertAuthority(t *testing.T, caType types.CertAuthType, domain string) ty
return ca
}
-func newPlugin(t *testing.T, integrationName string) *types.PluginV1 {
- t.Helper()
- return &types.PluginV1{
- Metadata: types.Metadata{
- Name: types.PluginTypeAWSIdentityCenter,
- Labels: map[string]string{
- types.HostedPluginLabel: "true",
- },
- },
- Spec: types.PluginSpecV1{
- Settings: &types.PluginSpecV1_AwsIc{
- AwsIc: &types.PluginAWSICSettings{
- IntegrationName: integrationName,
- Region: "test-region",
- Arn: "test-arn",
- AccessListDefaultOwners: []string{"user1", "user2"},
- ProvisioningSpec: &types.AWSICProvisioningSpec{
- BaseUrl: "https://example.com",
- },
- },
- },
- },
- }
-}
-
func newGitHubIntegration(name, id, secret string) (*types.IntegrationV1, error) {
ig, err := types.NewIntegrationGitHub(
types.Metadata{
diff --git a/lib/autoupdate/rollout/strategy_haltonerror.go b/lib/autoupdate/rollout/strategy_haltonerror.go
new file mode 100644
index 0000000000000..c93438aaa1941
--- /dev/null
+++ b/lib/autoupdate/rollout/strategy_haltonerror.go
@@ -0,0 +1,155 @@
+/*
+ * Teleport
+ * Copyright (C) 2024 Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+package rollout
+
+import (
+ "context"
+ "log/slog"
+ "time"
+
+ "github.com/gravitational/trace"
+ "github.com/jonboulle/clockwork"
+
+ "github.com/gravitational/teleport/api/gen/proto/go/teleport/autoupdate/v1"
+ update "github.com/gravitational/teleport/api/types/autoupdate"
+)
+
+const (
+ updateReasonCanStart = "can_start"
+ updateReasonCannotStart = "cannot_start"
+ updateReasonPreviousGroupsNotDone = "previous_groups_not_done"
+ updateReasonUpdateComplete = "update_complete"
+ updateReasonUpdateInProgress = "update_in_progress"
+)
+
+type haltOnErrorStrategy struct {
+ log *slog.Logger
+ clock clockwork.Clock
+}
+
+func (h *haltOnErrorStrategy) name() string {
+ return update.AgentsStrategyHaltOnError
+}
+
+func newHaltOnErrorStrategy(log *slog.Logger, clock clockwork.Clock) (rolloutStrategy, error) {
+ if log == nil {
+ return nil, trace.BadParameter("missing log")
+ }
+ if clock == nil {
+ return nil, trace.BadParameter("missing clock")
+ }
+ return &haltOnErrorStrategy{
+ log: log.With("strategy", update.AgentsStrategyHaltOnError),
+ clock: clock,
+ }, nil
+}
+
+func (h *haltOnErrorStrategy) progressRollout(ctx context.Context, groups []*autoupdate.AutoUpdateAgentRolloutStatusGroup) error {
+ now := h.clock.Now()
+ // We process every group in order, all the previous groups must be in the DONE state
+ // for the next group to become active. Even if some early groups are not DONE,
+ // later groups might be ACTIVE and need to transition to DONE, so we cannot
+ // return early and must process every group.
+ //
+ // For example, in a dev/staging/prod setup, the "dev" group might get rolled
+ // back while "staging" is still ACTIVE. We must not start PROD but still need
+ // to transition "staging" to DONE.
+ previousGroupsAreDone := true
+
+ for i, group := range groups {
+ switch group.State {
+ case autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED:
+ var previousGroup *autoupdate.AutoUpdateAgentRolloutStatusGroup
+ if i != 0 {
+ previousGroup = groups[i-1]
+ }
+ canStart, err := canStartHaltOnError(group, previousGroup, now)
+ if err != nil {
+ // In halt-on-error rollouts, groups are dependent.
+ // Failing to transition a group should prevent other groups from transitioning.
+ setGroupState(group, group.State, updateReasonReconcilerError, now)
+ return err
+ }
+ switch {
+ case previousGroupsAreDone && canStart:
+ // We can start
+ setGroupState(group, autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE, updateReasonCanStart, now)
+ case previousGroupsAreDone:
+ // All previous groups are OK, but time-related criteria are not OK
+ setGroupState(group, group.State, updateReasonCannotStart, now)
+ default:
+ // At least one previous group is not DONE
+ setGroupState(group, group.State, updateReasonPreviousGroupsNotDone, now)
+ }
+ previousGroupsAreDone = false
+ case autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ROLLEDBACK:
+ // The group has been manually rolled back. We don't touch anything and
+ // don't process the next groups.
+ previousGroupsAreDone = false
+ case autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE:
+ // The group has already been updated, we can look at the next group
+ case autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE:
+ // The group is currently being updated. We check if we can transition it to the done state
+ done, reason := isDoneHaltOnError(group, now)
+
+ if done {
+ // We transition to the done state. We continue processing the groups as we might be able to start the next one.
+ setGroupState(group, autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE, reason, now)
+ } else {
+ setGroupState(group, autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE, reason, now)
+ }
+ previousGroupsAreDone = false
+
+ default:
+ return trace.BadParameter("unknown autoupdate group state: %v", group.State)
+ }
+ }
+ return nil
+}
+
+func canStartHaltOnError(group, previousGroup *autoupdate.AutoUpdateAgentRolloutStatusGroup, now time.Time) (bool, error) {
+ // check wait hours
+ if group.ConfigWaitHours != 0 {
+ if previousGroup == nil {
+ return false, trace.BadParameter("the first group cannot have non-zero wait hours")
+ }
+
+ previousStart := previousGroup.StartTime.AsTime()
+ if previousStart.IsZero() || previousStart.Unix() == 0 {
+ return false, trace.BadParameter("the previous group doesn't have a start time, cannot check the 'wait_hours' criteria")
+ }
+
+ // Check if the wait_hours criteria is OK, if we are at least after 'wait_hours' hours since the previous start.
+ if now.Before(previousGroup.StartTime.AsTime().Add(time.Duration(group.ConfigWaitHours) * time.Hour)) {
+ return false, nil
+ }
+ }
+
+ return inWindow(group, now)
+}
+
+func isDoneHaltOnError(group *autoupdate.AutoUpdateAgentRolloutStatusGroup, now time.Time) (bool, string) {
+ // Currently we don't implement status reporting from groups/agents.
+ // So we just wait 60 minutes and consider the maintenance done.
+ // This will change as we introduce agent status report and aggregated agent counts.
+ if group.StartTime.AsTime().Add(time.Hour).Before(now) {
+ return true, updateReasonUpdateComplete
+ }
+ return false, updateReasonUpdateInProgress
+}
diff --git a/lib/autoupdate/rollout/strategy_haltonerror_test.go b/lib/autoupdate/rollout/strategy_haltonerror_test.go
new file mode 100644
index 0000000000000..71a653c760361
--- /dev/null
+++ b/lib/autoupdate/rollout/strategy_haltonerror_test.go
@@ -0,0 +1,482 @@
+/*
+ * Teleport
+ * Copyright (C) 2024 Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+package rollout
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/gravitational/teleport/api/gen/proto/go/teleport/autoupdate/v1"
+ "github.com/gravitational/teleport/lib/utils"
+)
+
+func Test_canStartHaltOnError(t *testing.T) {
+ now := testSunday
+ yesterday := testSaturday
+
+ tests := []struct {
+ name string
+ group *autoupdate.AutoUpdateAgentRolloutStatusGroup
+ previousGroup *autoupdate.AutoUpdateAgentRolloutStatusGroup
+ want bool
+ wantErr require.ErrorAssertionFunc
+ }{
+ {
+ name: "first group, no wait_hours",
+ group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "test-group",
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 0,
+ },
+ want: true,
+ wantErr: require.NoError,
+ },
+ {
+ name: "first group, wait_days (invalid)",
+ group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "test-group",
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 1,
+ },
+ want: false,
+ wantErr: require.Error,
+ },
+ {
+ name: "second group, no wait_days",
+ group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "test-group",
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 0,
+ },
+ previousGroup: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "previous-group",
+ StartTime: timestamppb.New(now),
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 0,
+ },
+ want: true,
+ wantErr: require.NoError,
+ },
+ {
+ name: "second group, wait_days not over",
+ group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "test-group",
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 48,
+ },
+ previousGroup: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "previous-group",
+ StartTime: timestamppb.New(yesterday),
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 0,
+ },
+ want: false,
+ wantErr: require.NoError,
+ },
+ {
+ name: "second group, wait_days over",
+ group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "test-group",
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 24,
+ },
+ previousGroup: &autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ Name: "previous-group",
+ StartTime: timestamppb.New(yesterday),
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ ConfigDays: everyWeekday,
+ ConfigStartHour: int32(now.Hour()),
+ ConfigWaitHours: 0,
+ },
+ want: true,
+ wantErr: require.NoError,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := canStartHaltOnError(tt.group, tt.previousGroup, now)
+ tt.wantErr(t, err)
+ require.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func Test_progressGroupsHaltOnError(t *testing.T) {
+ clock := clockwork.NewFakeClockAt(testSunday)
+ log := utils.NewSlogLoggerForTests()
+ strategy, err := newHaltOnErrorStrategy(log, clock)
+ require.NoError(t, err)
+
+ fewMinutesAgo := clock.Now().Add(-5 * time.Minute)
+ yesterday := testSaturday
+ canStartToday := everyWeekday
+ cannotStartToday := everyWeekdayButSunday
+ ctx := context.Background()
+
+ group1Name := "group1"
+ group2Name := "group2"
+ group3Name := "group3"
+
+ tests := []struct {
+ name string
+ initialState []*autoupdate.AutoUpdateAgentRolloutStatusGroup
+ expectedState []*autoupdate.AutoUpdateAgentRolloutStatusGroup
+ }{
+ {
+ name: "single group unstarted -> unstarted",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonCreated,
+ ConfigDays: cannotStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonCannotStart,
+ ConfigDays: cannotStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ },
+ {
+ name: "single group unstarted -> active",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonCreated,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE,
+ StartTime: timestamppb.New(clock.Now()),
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonCanStart,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ },
+ {
+ name: "single group active -> active",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE,
+ StartTime: timestamppb.New(fewMinutesAgo),
+ LastUpdateTime: timestamppb.New(fewMinutesAgo),
+ LastUpdateReason: updateReasonCanStart,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE,
+ StartTime: timestamppb.New(fewMinutesAgo),
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonUpdateInProgress,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ },
+ {
+ name: "single group active -> done",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonUpdateInProgress,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonUpdateComplete,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ },
+ {
+ name: "single group done -> done",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonUpdateComplete,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonUpdateComplete,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ },
+ {
+ name: "single group rolledback -> rolledback",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ROLLEDBACK,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: "manual_rollback",
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ROLLEDBACK,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: "manual_rollback",
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ },
+ },
+ {
+ name: "first group done, second should activate, third should not progress",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonUpdateComplete,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ {
+ Name: group2Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonCreated,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 24,
+ },
+ {
+ Name: group3Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonCreated,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 0,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonUpdateComplete,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ {
+ Name: group2Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE,
+ StartTime: timestamppb.New(clock.Now()),
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonCanStart,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 24,
+ },
+ {
+ Name: group3Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonPreviousGroupsNotDone,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 0,
+ },
+ },
+ },
+ {
+ name: "first group rolledback, second should not start",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ROLLEDBACK,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: "manual_rollback",
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ {
+ Name: group2Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonCreated,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 24,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ROLLEDBACK,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: "manual_rollback",
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ {
+ Name: group2Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonPreviousGroupsNotDone,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 24,
+ },
+ },
+ },
+ {
+ name: "first group rolledback, second is active and should become done, third should not progress",
+ initialState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ROLLEDBACK,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: "manual_rollback",
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ {
+ Name: group2Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonCanStart,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 0,
+ },
+ {
+ Name: group3Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: updateReasonCreated,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 0,
+ },
+ },
+ expectedState: []*autoupdate.AutoUpdateAgentRolloutStatusGroup{
+ {
+ Name: group1Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ROLLEDBACK,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(yesterday),
+ LastUpdateReason: "manual_rollback",
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ },
+ {
+ Name: group2Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_DONE,
+ StartTime: timestamppb.New(yesterday),
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonUpdateComplete,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 0,
+ },
+ {
+ Name: group3Name,
+ State: autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_UNSTARTED,
+ LastUpdateTime: timestamppb.New(clock.Now()),
+ LastUpdateReason: updateReasonPreviousGroupsNotDone,
+ ConfigDays: canStartToday,
+ ConfigStartHour: matchingStartHour,
+ ConfigWaitHours: 0,
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := strategy.progressRollout(ctx, tt.initialState)
+ require.NoError(t, err)
+ // We use require.Equal instead of Elements match because group order matters.
+ // It's not super important for time-based, but is crucial for halt-on-error.
+ // So it's better to be more conservative and validate order never changes for
+ // both strategies.
+ require.Equal(t, tt.expectedState, tt.initialState)
+ })
+ }
+}
diff --git a/lib/autoupdate/rollout/strategy_test.go b/lib/autoupdate/rollout/strategy_test.go
index 2f5a7087ce40f..fbb7ec768d644 100644
--- a/lib/autoupdate/rollout/strategy_test.go
+++ b/lib/autoupdate/rollout/strategy_test.go
@@ -30,9 +30,8 @@ import (
)
var (
- // TODO(hugoShaka) uncomment in the next PRs when this value will become useful
// 2024-11-30 is a Saturday
- // testSaturday = time.Date(2024, 11, 30, 15, 30, 0, 0, time.UTC)
+ testSaturday = time.Date(2024, 11, 30, 12, 30, 0, 0, time.UTC)
// 2024-12-01 is a Sunday
testSunday = time.Date(2024, 12, 1, 12, 30, 0, 0, time.UTC)
matchingStartHour = int32(12)
diff --git a/lib/cache/cache.go b/lib/cache/cache.go
index b29d9cbd07054..0c0f05febe720 100644
--- a/lib/cache/cache.go
+++ b/lib/cache/cache.go
@@ -21,6 +21,7 @@ package cache
import (
"context"
"fmt"
+ "log/slog"
"sync"
"sync/atomic"
"time"
@@ -28,7 +29,6 @@ import (
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
"github.com/prometheus/client_golang/prometheus"
- log "github.com/sirupsen/logrus"
"go.opentelemetry.io/otel/attribute"
oteltrace "go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
@@ -466,8 +466,8 @@ type SetupConfigFn func(c Config) Config
type Cache struct {
Config
- // Entry is a logging entry
- Logger *log.Entry
+ // Logger emits log messages.
+ Logger *slog.Logger
// rw is used to prevent reads of invalid cache states. From a
// memory-safety perspective, this RWMutex is used to protect
@@ -1106,9 +1106,10 @@ func New(config Config) (*Cache, error) {
pluginStaticCredentialsCache: pluginStaticCredentialsCache,
gitServersCache: gitServersCache,
workloadIdentityCache: workloadIdentityCache,
- Logger: log.WithFields(log.Fields{
- teleport.ComponentKey: config.Component,
- }),
+ Logger: slog.With(
+ teleport.ComponentKey, config.Component,
+ "target", config.target,
+ ),
}
collections, err := setupCollections(cs, config.Watches)
if err != nil {
@@ -1148,15 +1149,15 @@ func (c *Cache) Start() error {
select {
case <-c.initC:
if c.initErr == nil {
- c.Logger.Infof("Cache %q first init succeeded.", c.Config.target)
+ c.Logger.InfoContext(c.ctx, "Cache first init succeeded")
} else {
- c.Logger.WithError(c.initErr).Warnf("Cache %q first init failed, continuing re-init attempts in background.", c.Config.target)
+ c.Logger.WarnContext(c.ctx, "Cache first init failed, continuing re-init attempts in background", "error", c.initErr)
}
case <-c.ctx.Done():
c.Close()
return trace.Wrap(c.ctx.Err(), "context closed during cache init")
case <-time.After(c.Config.CacheInitTimeout):
- c.Logger.Warn("Cache init is taking too long, will continue in background.")
+ c.Logger.WarnContext(c.ctx, "Cache init is taking too long, will continue in background")
}
return nil
}
@@ -1259,7 +1260,7 @@ Outer:
func (c *Cache) update(ctx context.Context, retry retryutils.Retry) {
defer func() {
- c.Logger.Debug("Cache is closing, returning from update loop.")
+ c.Logger.DebugContext(ctx, "Cache is closing, returning from update loop")
// ensure that close operations have been run
c.Close()
}()
@@ -1271,11 +1272,11 @@ func (c *Cache) update(ctx context.Context, retry retryutils.Retry) {
return
}
if err != nil {
- c.Logger.Warnf("Re-init the cache on error: %v", err)
+ c.Logger.WarnContext(ctx, "Re-init the cache on error", "error", err)
}
// events cache should be closed as well
- c.Logger.Debug("Reloading cache.")
+ c.Logger.DebugContext(ctx, "Reloading cache")
c.notify(ctx, Event{Type: Reloading, Event: types.Event{
Resource: &types.ResourceHeader{
@@ -1286,7 +1287,7 @@ func (c *Cache) update(ctx context.Context, retry retryutils.Retry) {
startedWaiting := c.Clock.Now()
select {
case t := <-retry.After():
- c.Logger.Debugf("Initiating new watch after waiting %v.", t.Sub(startedWaiting))
+ c.Logger.DebugContext(ctx, "Initiating new watch after backoff", "backoff_time", t.Sub(startedWaiting))
retry.Inc()
case <-c.ctx.Done():
return
@@ -1415,7 +1416,9 @@ func (c *Cache) fetchAndWatch(ctx context.Context, retry retryutils.Retry, timer
rejectedKinds = append(rejectedKinds, key.String())
}
}
- c.Logger.WithField("rejected", rejectedKinds).Warn("Some resource kinds unsupported by the server cannot be cached")
+ c.Logger.WarnContext(ctx, "Some resource kinds unsupported by the server cannot be cached",
+ "rejected", rejectedKinds,
+ )
}
apply, err := c.fetch(ctx, confirmedKindsMap)
@@ -1468,15 +1471,15 @@ func (c *Cache) fetchAndWatch(ctx context.Context, retry retryutils.Retry, timer
fetchAndApplyDuration := time.Since(fetchAndApplyStart)
if fetchAndApplyDuration > time.Second*20 {
- c.Logger.WithFields(log.Fields{
- "cache_target": c.Config.target,
- "duration": fetchAndApplyDuration.String(),
- }).Warn("slow fetch and apply")
+ c.Logger.WarnContext(ctx, "slow fetch and apply",
+ "cache_target", c.Config.target,
+ "duration", fetchAndApplyDuration.String(),
+ )
} else {
- c.Logger.WithFields(log.Fields{
- "cache_target": c.Config.target,
- "duration": fetchAndApplyDuration.String(),
- }).Debug("fetch and apply")
+ c.Logger.DebugContext(ctx, "fetch and apply",
+ "cache_target", c.Config.target,
+ "duration", fetchAndApplyDuration.String(),
+ )
}
var lastStalenessWarning time.Time
@@ -1519,7 +1522,10 @@ func (c *Cache) fetchAndWatch(ctx context.Context, retry retryutils.Retry, timer
if sk := event.Resource.GetSubKind(); sk != "" {
kind = fmt.Sprintf("%s/%s", kind, sk)
}
- c.Logger.WithField("last_kind", kind).Warnf("Encountered %d stale event(s), may indicate degraded backend or event system performance.", staleEventCount)
+ c.Logger.WarnContext(ctx, "Encountered stale event(s), may indicate degraded backend or event system performance",
+ "stale_event_count", staleEventCount,
+ "last_kind", kind,
+ )
lastStalenessWarning = now
staleEventCount = 0
}
@@ -1638,7 +1644,10 @@ func (c *Cache) performRelativeNodeExpiry(ctx context.Context) error {
}
if removed > 0 {
- c.Logger.Debugf("Removed %d nodes via relative expiry (retentionThreshold=%s).", removed, retentionThreshold)
+ c.Logger.DebugContext(ctx, "Removed nodes via relative expiry",
+ "removed_node_count", removed,
+ "retention_threshold", retentionThreshold,
+ )
}
return nil
@@ -1779,7 +1788,10 @@ func (c *Cache) processEvent(ctx context.Context, event types.Event) error {
resourceKind := resourceKindFromResource(event.Resource)
collection, ok := c.collections.byKind[resourceKind]
if !ok {
- c.Logger.Warnf("Skipping unsupported event %v/%v", event.Resource.GetKind(), event.Resource.GetSubKind())
+ c.Logger.WarnContext(ctx, "Skipping unsupported event",
+ "event_kind", event.Resource.GetKind(),
+ "event_sub_kind", event.Resource.GetSubKind(),
+ )
return nil
}
if err := collection.processEvent(ctx, event); err != nil {
@@ -2595,7 +2607,10 @@ func (c *Cache) GetAppSession(ctx context.Context, req types.GetAppSessionReques
// fallback is sane because method is never used
// in construction of derivative caches.
if sess, err := c.Config.AppSession.GetAppSession(ctx, req); err == nil {
- c.Logger.Debugf("Cache was forced to load session %v/%v from upstream.", sess.GetSubKind(), sess.GetName())
+ c.Logger.DebugContext(ctx, "Cache was forced to load session from upstream",
+ "session_kind", sess.GetSubKind(),
+ "session", sess.GetName(),
+ )
return sess, nil
}
}
@@ -2634,7 +2649,10 @@ func (c *Cache) GetSnowflakeSession(ctx context.Context, req types.GetSnowflakeS
// fallback is sane because method is never used
// in construction of derivative caches.
if sess, err := c.Config.SnowflakeSession.GetSnowflakeSession(ctx, req); err == nil {
- c.Logger.Debugf("Cache was forced to load session %v/%v from upstream.", sess.GetSubKind(), sess.GetName())
+ c.Logger.DebugContext(ctx, "Cache was forced to load sessionfrom upstream",
+ "session_kind", sess.GetSubKind(),
+ "session", sess.GetName(),
+ )
return sess, nil
}
}
@@ -2660,7 +2678,10 @@ func (c *Cache) GetSAMLIdPSession(ctx context.Context, req types.GetSAMLIdPSessi
// fallback is sane because method is never used
// in construction of derivative caches.
if sess, err := c.Config.SAMLIdPSession.GetSAMLIdPSession(ctx, req); err == nil {
- c.Logger.Debugf("Cache was forced to load session %v/%v from upstream.", sess.GetSubKind(), sess.GetName())
+ c.Logger.DebugContext(ctx, "Cache was forced to load sessionfrom upstream",
+ "session_kind", sess.GetSubKind(),
+ "session", sess.GetName(),
+ )
return sess, nil
}
}
@@ -2750,7 +2771,10 @@ func (c *Cache) GetWebSession(ctx context.Context, req types.GetWebSessionReques
// fallback is sane because method is never used
// in construction of derivative caches.
if sess, err := c.Config.WebSession.Get(ctx, req); err == nil {
- c.Logger.Debugf("Cache was forced to load session %v/%v from upstream.", sess.GetSubKind(), sess.GetName())
+ c.Logger.DebugContext(ctx, "Cache was forced to load sessionfrom upstream",
+ "session_kind", sess.GetSubKind(),
+ "session", sess.GetName(),
+ )
return sess, nil
}
}
diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go
index e60f0acac0174..6819e8742fb58 100644
--- a/lib/cache/cache_test.go
+++ b/lib/cache/cache_test.go
@@ -21,6 +21,7 @@ package cache
import (
"context"
"fmt"
+ "log/slog"
"os"
"slices"
"strconv"
@@ -33,7 +34,6 @@ import (
"github.com/google/uuid"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
- log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
protobuf "google.golang.org/protobuf/proto"
@@ -179,7 +179,7 @@ func (t *testPack) Close() {
errors = append(errors, t.cache.Close())
}
if err := trace.NewAggregate(errors...); err != nil {
- log.Warningf("Failed to close %v", err)
+ slog.WarnContext(context.Background(), "Failed to close", "error", err)
}
}
diff --git a/lib/cache/collections.go b/lib/cache/collections.go
index f73f83fcddb83..1af1b2d86d302 100644
--- a/lib/cache/collections.go
+++ b/lib/cache/collections.go
@@ -943,7 +943,7 @@ func (remoteClusterExecutor) upsert(ctx context.Context, cache *Cache, resource
err := cache.trustCache.DeleteRemoteCluster(ctx, resource.GetName())
if err != nil {
if !trace.IsNotFound(err) {
- cache.Logger.WithError(err).Warnf("Failed to delete remote cluster %v.", resource.GetName())
+ cache.Logger.WarnContext(ctx, "Failed to delete remote cluster", "cluster", resource.GetName(), "error", err)
return trace.Wrap(err)
}
}
diff --git a/lib/cache/genericcollection.go b/lib/cache/genericcollection.go
index ac0260da844d9..871e9f8ffd9ae 100644
--- a/lib/cache/genericcollection.go
+++ b/lib/cache/genericcollection.go
@@ -81,7 +81,7 @@ func (g *genericCollection[T, R, _]) processEvent(ctx context.Context, event typ
case types.OpDelete:
if err := g.exec.delete(ctx, g.cache, event.Resource); err != nil {
if !trace.IsNotFound(err) {
- g.cache.Logger.WithError(err).Warn("Failed to delete resource.")
+ g.cache.Logger.WarnContext(ctx, "Failed to delete resource", "error", err)
return trace.Wrap(err)
}
}
@@ -107,7 +107,7 @@ func (g *genericCollection[T, R, _]) processEvent(ctx context.Context, event typ
return trace.Wrap(err)
}
default:
- g.cache.Logger.WithField("event", event.Type).Warn("Skipping unsupported event type.")
+ g.cache.Logger.WarnContext(ctx, "Skipping unsupported event type", "event", event.Type)
}
return nil
}
diff --git a/lib/cloud/aws/aws.go b/lib/cloud/aws/aws.go
index 50a27d991cca2..b1923a9bb9c3f 100644
--- a/lib/cloud/aws/aws.go
+++ b/lib/cloud/aws/aws.go
@@ -17,6 +17,8 @@
package aws
import (
+ "context"
+ "log/slog"
"slices"
"strings"
@@ -27,9 +29,9 @@ import (
"github.com/aws/aws-sdk-go/service/rds"
"github.com/aws/aws-sdk-go/service/redshift"
"github.com/coreos/go-semver/semver"
- log "github.com/sirupsen/logrus"
"github.com/gravitational/teleport/lib/services"
+ logutils "github.com/gravitational/teleport/lib/utils/log"
)
// IsResourceAvailable checks if the input status indicates the resource is
@@ -38,7 +40,7 @@ import (
// Note that this function checks some common values but not necessarily covers
// everything. For types that have other known status values, separate
// functions (e.g. IsDBClusterAvailable) can be implemented.
-func IsResourceAvailable(r interface{}, status *string) bool {
+func IsResourceAvailable(r any, status *string) bool {
switch strings.ToLower(aws.StringValue(status)) {
case "available", "modifying", "snapshotting", "active":
return true
@@ -47,7 +49,10 @@ func IsResourceAvailable(r interface{}, status *string) bool {
return false
default:
- log.WithField("aws_resource", r).Warnf("Unknown status type: %q. Assuming the AWS resource %T is available.", aws.StringValue(status), r)
+ slog.WarnContext(context.Background(), "Assuming that AWS resource with an unknown status is available",
+ "status", aws.StringValue(status),
+ "resource", logutils.TypeAttr(r),
+ )
return true
}
}
@@ -89,7 +94,7 @@ func IsRDSInstanceSupported(instance *rds.DBInstance) bool {
// MariaDB follows semver schema: https://mariadb.org/about/
ver, err := semver.NewVersion(aws.StringValue(instance.EngineVersion))
if err != nil {
- log.Errorf("Failed to parse RDS MariaDB version: %s", aws.StringValue(instance.EngineVersion))
+ slog.ErrorContext(context.Background(), "Failed to parse RDS MariaDB version", "version", aws.StringValue(instance.EngineVersion))
return false
}
@@ -152,7 +157,7 @@ func AuroraMySQLVersion(cluster *rds.DBCluster) string {
func IsDocumentDBClusterSupported(cluster *rds.DBCluster) bool {
ver, err := semver.NewVersion(aws.StringValue(cluster.EngineVersion))
if err != nil {
- log.Errorf("Failed to parse DocumentDB engine version: %s", aws.StringValue(cluster.EngineVersion))
+ slog.ErrorContext(context.Background(), "Failed to parse DocumentDB engine version", "version", aws.StringValue(cluster.EngineVersion))
return false
}
@@ -201,9 +206,9 @@ func IsRDSInstanceAvailable(instanceStatus, instanceIdentifier *string) bool {
return false
default:
- log.Warnf("Unknown status type: %q. Assuming RDS instance %q is available.",
- aws.StringValue(instanceStatus),
- aws.StringValue(instanceIdentifier),
+ slog.WarnContext(context.Background(), "Assuming RDS instance with unknown status is available",
+ "status", aws.StringValue(instanceStatus),
+ "instance", aws.StringValue(instanceIdentifier),
)
return true
}
@@ -230,9 +235,9 @@ func IsDBClusterAvailable(clusterStatus, clusterIndetifier *string) bool {
return false
default:
- log.Warnf("Unknown status type: %q. Assuming Aurora cluster %q is available.",
- aws.StringValue(clusterStatus),
- aws.StringValue(clusterIndetifier),
+ slog.WarnContext(context.Background(), "Assuming Aurora cluster with unknown status is available",
+ "status", aws.StringValue(clusterStatus),
+ "cluster", aws.StringValue(clusterIndetifier),
)
return true
}
@@ -264,9 +269,9 @@ func IsRedshiftClusterAvailable(cluster *redshift.Cluster) bool {
return false
default:
- log.Warnf("Unknown status type: %q. Assuming Redshift cluster %q is available.",
- aws.StringValue(cluster.ClusterStatus),
- aws.StringValue(cluster.ClusterIdentifier),
+ slog.WarnContext(context.Background(), "Assuming Redshift cluster with unknown status is available",
+ "status", aws.StringValue(cluster.ClusterStatus),
+ "cluster", aws.StringValue(cluster.ClusterIdentifier),
)
return true
}
diff --git a/lib/cloud/aws/policy.go b/lib/cloud/aws/policy.go
index 0208dd8ae6bba..e69810af685df 100644
--- a/lib/cloud/aws/policy.go
+++ b/lib/cloud/aws/policy.go
@@ -21,6 +21,7 @@ package aws
import (
"context"
"encoding/json"
+ "log/slog"
"net/url"
"slices"
"sort"
@@ -29,7 +30,6 @@ import (
"github.com/aws/aws-sdk-go-v2/service/iam"
iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types"
"github.com/gravitational/trace"
- log "github.com/sirupsen/logrus"
awsutils "github.com/gravitational/teleport/lib/utils/aws"
)
@@ -517,7 +517,10 @@ func (p *policies) Upsert(ctx context.Context, policy *Policy) (string, error) {
return "", trace.Wrap(err)
}
- log.Debugf("Created new policy %q with ARN %q", policy.Name, policyARN)
+ slog.DebugContext(ctx, "Created new policy",
+ "policy_name", policy.Name,
+ "policy_arn", policyARN,
+ )
return policyARN, nil
}
@@ -543,7 +546,10 @@ func (p *policies) Upsert(ctx context.Context, policy *Policy) (string, error) {
return "", trace.Wrap(err)
}
- log.Debugf("Max policy versions reached for policy %q, deleted policy version %q", policyARN, policyVersionID)
+ slog.DebugContext(ctx, "Max policy versions reached for policy, deleted non-default policy version",
+ "policy_arn", policyARN,
+ "policy_version", policyVersionID,
+ )
}
// Create new policy version.
@@ -552,7 +558,10 @@ func (p *policies) Upsert(ctx context.Context, policy *Policy) (string, error) {
return "", trace.Wrap(err)
}
- log.Debugf("Created new policy version %q for %q", versionID, policyARN)
+ slog.DebugContext(ctx, "Created new policy version",
+ "policy_version", versionID,
+ "policy_arn", policyARN,
+ )
return policyARN, nil
}
diff --git a/lib/cloud/aws/tags_helpers.go b/lib/cloud/aws/tags_helpers.go
index a6f9f26fdc1ab..27dbe8238f178 100644
--- a/lib/cloud/aws/tags_helpers.go
+++ b/lib/cloud/aws/tags_helpers.go
@@ -19,6 +19,8 @@
package aws
import (
+ "context"
+ "log/slog"
"slices"
ec2TypesV2 "github.com/aws/aws-sdk-go-v2/service/ec2/types"
@@ -32,7 +34,6 @@ import (
"github.com/aws/aws-sdk-go/service/redshift"
"github.com/aws/aws-sdk-go/service/redshiftserverless"
"github.com/aws/aws-sdk-go/service/secretsmanager"
- "github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"github.com/gravitational/teleport/api/types"
@@ -67,7 +68,7 @@ func TagsToLabels[Tag ResourceTag](tags []Tag) map[string]string {
if types.IsValidLabelKey(key) {
labels[key] = value
} else {
- logrus.Debugf("Skipping AWS resource tag %q, not a valid label key.", key)
+ slog.DebugContext(context.Background(), "Skipping AWS resource tag with invalid label key", "key", key)
}
}
return labels
diff --git a/lib/cloud/azure/db_server.go b/lib/cloud/azure/db_server.go
index 3efd2a65a7339..855274dfcd096 100644
--- a/lib/cloud/azure/db_server.go
+++ b/lib/cloud/azure/db_server.go
@@ -19,9 +19,11 @@
package azure
import (
+ "context"
+ "log/slog"
+
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/mysql/armmysql"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresql"
- log "github.com/sirupsen/logrus"
"github.com/gravitational/teleport/lib/defaults"
)
@@ -120,9 +122,9 @@ func (s *DBServer) IsAvailable() bool {
case "Inaccessible", "Dropping", "Disabled":
return false
default:
- log.Warnf("Unknown server state: %q. Assuming Azure DB server %q is available.",
- s.Properties.UserVisibleState,
- s.Name,
+ slog.WarnContext(context.Background(), "Assuming Azure DB server with unknown server state is available",
+ "state", s.Properties.UserVisibleState,
+ "server", s.Name,
)
return true
}
diff --git a/lib/cloud/azure/kubernetes.go b/lib/cloud/azure/kubernetes.go
index 1f2e0009daa59..ff78307b102f1 100644
--- a/lib/cloud/azure/kubernetes.go
+++ b/lib/cloud/azure/kubernetes.go
@@ -21,6 +21,7 @@ package azure
import (
"context"
"fmt"
+ "log/slog"
"strings"
"time"
@@ -33,7 +34,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v6"
"github.com/golang-jwt/jwt/v4"
"github.com/gravitational/trace"
- "github.com/sirupsen/logrus"
v1 "k8s.io/api/rbac/v1"
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -195,7 +195,10 @@ func (c *aksClient) ListAll(ctx context.Context) ([]*AKSCluster, error) {
for _, s := range page.Value {
cluster, err := AKSClusterFromManagedCluster(s)
if err != nil {
- logrus.WithError(err).Debugf("Failed to convert discovered AKS cluster %q to Teleport internal representation.", StringVal(s.Name))
+ slog.DebugContext(ctx, "Failed to convert discovered AKS cluster to Teleport internal representation",
+ "cluster", StringVal(s.Name),
+ "error", err,
+ )
continue
}
servers = append(servers, cluster)
@@ -217,7 +220,10 @@ func (c *aksClient) ListWithinGroup(ctx context.Context, group string) ([]*AKSCl
for _, s := range page.Value {
cluster, err := AKSClusterFromManagedCluster(s)
if err != nil {
- logrus.WithError(err).Debugf("Failed to convert discovered AKS cluster %q to Teleport internal representation.", StringVal(s.Name))
+ slog.DebugContext(ctx, "Failed to convert discovered AKS cluster to Teleport internal representation",
+ "cluster", StringVal(s.Name),
+ "error", err,
+ )
continue
}
servers = append(servers, cluster)
diff --git a/lib/cloud/azure/mysql.go b/lib/cloud/azure/mysql.go
index 605e255e0a150..5fbd5e3b68042 100644
--- a/lib/cloud/azure/mysql.go
+++ b/lib/cloud/azure/mysql.go
@@ -20,10 +20,10 @@ package azure
import (
"context"
+ "log/slog"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/mysql/armmysql"
"github.com/gravitational/trace"
- log "github.com/sirupsen/logrus"
)
var _ DBServersClient = (*mySQLClient)(nil)
@@ -87,9 +87,9 @@ func isMySQLVersionSupported(s *DBServer) bool {
case armmysql.ServerVersionFive6:
return false
default:
- log.Warnf("Unknown server version: %q. Assuming Azure DB server %q is supported.",
- s.Properties.Version,
- s.Name,
+ slog.WarnContext(context.Background(), "Assuming Azure DB server with unknown server version is supported",
+ "version", s.Properties.Version,
+ "server", s.Name,
)
return true
}
diff --git a/lib/cloud/azure/mysql_flex.go b/lib/cloud/azure/mysql_flex.go
index acc7ef6c99ad8..f2d3b00ca860f 100644
--- a/lib/cloud/azure/mysql_flex.go
+++ b/lib/cloud/azure/mysql_flex.go
@@ -26,7 +26,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/mysql/armmysqlflexibleservers"
"github.com/gravitational/trace"
- "github.com/sirupsen/logrus"
)
// armMySQLFlexServersClient is an interface that defines a subset of functions of armmysqlflexibleservers.ServersClient.
@@ -44,7 +43,6 @@ var _ MySQLFlexServersClient = (*mySQLFlexServersClient)(nil)
// NewMySQLFlexServersClient creates a new Azure MySQL Flexible server client by subscription and credentials.
func NewMySQLFlexServersClient(subID string, cred azcore.TokenCredential, opts *arm.ClientOptions) (MySQLFlexServersClient, error) {
- logrus.Debug("Initializing Azure MySQL Flexible servers client.")
api, err := armmysqlflexibleservers.NewServersClient(subID, cred, opts)
if err != nil {
return nil, trace.Wrap(err)
diff --git a/lib/cloud/azure/postgres_flex.go b/lib/cloud/azure/postgres_flex.go
index 8c894ed1ece10..8b05b3b1c7715 100644
--- a/lib/cloud/azure/postgres_flex.go
+++ b/lib/cloud/azure/postgres_flex.go
@@ -26,7 +26,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers"
"github.com/gravitational/trace"
- "github.com/sirupsen/logrus"
)
type armPostgresFlexServersClient interface {
@@ -44,7 +43,6 @@ var _ PostgresFlexServersClient = (*postgresFlexServersClient)(nil)
// NewPostgresFlexServersClient creates a new Azure PostgreSQL Flexible server client by subscription and credentials.
func NewPostgresFlexServersClient(subID string, cred azcore.TokenCredential, opts *arm.ClientOptions) (PostgresFlexServersClient, error) {
- logrus.Debug("Initializing Azure PostgreSQL Flexible servers client.")
api, err := armpostgresqlflexibleservers.NewServersClient(subID, cred, opts)
if err != nil {
return nil, trace.Wrap(err)
diff --git a/lib/cloud/azure/redis.go b/lib/cloud/azure/redis.go
index 318fe42772331..665be2d4fe7e3 100644
--- a/lib/cloud/azure/redis.go
+++ b/lib/cloud/azure/redis.go
@@ -26,7 +26,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redis/armredis/v3"
"github.com/gravitational/trace"
- "github.com/sirupsen/logrus"
)
// armRedisClient is an interface defines a subset of functions of armredis.Client.
@@ -43,7 +42,6 @@ type redisClient struct {
// NewRedisClient creates a new Azure Redis client by subscription and credentials.
func NewRedisClient(subscription string, cred azcore.TokenCredential, options *arm.ClientOptions) (RedisClient, error) {
- logrus.Debug("Initializing Azure Redis client.")
api, err := armredis.NewClient(subscription, cred, options)
if err != nil {
return nil, trace.Wrap(err)
diff --git a/lib/cloud/azure/redis_enterprise.go b/lib/cloud/azure/redis_enterprise.go
index ec52da777f363..feaf94a5687ba 100644
--- a/lib/cloud/azure/redis_enterprise.go
+++ b/lib/cloud/azure/redis_enterprise.go
@@ -21,13 +21,13 @@ package azure
import (
"context"
"fmt"
+ "log/slog"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redisenterprise/armredisenterprise"
"github.com/gravitational/trace"
- "github.com/sirupsen/logrus"
)
// armRedisEnterpriseDatabaseClient is an interface defines a subset of
@@ -53,7 +53,6 @@ type redisEnterpriseClient struct {
// NewRedisEnterpriseClient creates a new Azure Redis Enterprise client by
// subscription and credentials.
func NewRedisEnterpriseClient(subscription string, cred azcore.TokenCredential, options *arm.ClientOptions) (RedisEnterpriseClient, error) {
- logrus.Debug("Initializing Azure Redis Enterprise client.")
clusterAPI, err := armredisenterprise.NewClient(subscription, cred, options)
if err != nil {
return nil, trace.Wrap(err)
@@ -160,9 +159,14 @@ func (c *redisEnterpriseClient) listDatabasesByClusters(ctx context.Context, clu
databases, err := c.listDatabasesByCluster(ctx, cluster)
if err != nil {
if trace.IsAccessDenied(err) || trace.IsNotFound(err) {
- logrus.Debugf("Failed to listDatabasesByCluster on Redis Enterprise cluster %v: %v.", StringVal(cluster.Name), err.Error())
+ slog.DebugContext(ctx, "Failed to listDatabasesByCluster on Redis Enterprise cluster",
+ "cluster", StringVal(cluster.Name),
+ "error", err)
} else {
- logrus.Warnf("Failed to listDatabasesByCluster on Redis Enterprise cluster %v: %v.", StringVal(cluster.Name), err.Error())
+ slog.WarnContext(ctx, "Failed to listDatabasesByCluster on Redis Enterprise cluster",
+ "cluster", StringVal(cluster.Name),
+ "error", err,
+ )
}
continue
}
diff --git a/lib/cloud/clients.go b/lib/cloud/clients.go
index 93bb27f90b246..54b02d84dc400 100644
--- a/lib/cloud/clients.go
+++ b/lib/cloud/clients.go
@@ -21,6 +21,7 @@ package cloud
import (
"context"
"io"
+ "log/slog"
"sync"
"time"
@@ -63,7 +64,6 @@ import (
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
"github.com/gravitational/trace"
- "github.com/sirupsen/logrus"
"google.golang.org/api/option"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -788,12 +788,17 @@ func (c *cloudClients) getAWSSessionForRegion(ctx context.Context, region string
return nil, trace.BadParameter("missing aws integration session provider")
}
- logrus.Debugf("Initializing AWS session for region %v with integration %q.", region, opts.integration)
+ slog.DebugContext(ctx, "Initializing AWS session",
+ "region", region,
+ "integration", opts.integration,
+ )
session, err := c.awsIntegrationSessionProviderFn(ctx, region, opts.integration)
return session, trace.Wrap(err)
}
- logrus.Debugf("Initializing AWS session for region %v using environment credentials.", region)
+ slog.DebugContext(ctx, "Initializing AWS session using environment credentials",
+ "region", region,
+ )
session, err := awsAmbientSessionProvider(ctx, region)
return session, trace.Wrap(err)
}
@@ -867,7 +872,6 @@ func (c *cloudClients) initGCPIAMClient(ctx context.Context) (*gcpcredentials.Ia
if c.gcpIAM != nil { // If some other thread already got here first.
return c.gcpIAM, nil
}
- logrus.Debug("Initializing GCP IAM client.")
gcpIAM, err := gcpcredentials.NewIamCredentialsClient(ctx)
if err != nil {
return nil, trace.Wrap(err)
@@ -882,7 +886,6 @@ func (c *cloudClients) initAzureCredential() (azcore.TokenCredential, error) {
if c.azureCredential != nil { // If some other thread already got here first.
return c.azureCredential, nil
}
- logrus.Debug("Initializing Azure default credential chain.")
// TODO(gavin): if/when we support AzureChina/AzureGovernment, we will need to specify the cloud in these options
options := &azidentity.DefaultAzureCredentialOptions{}
cred, err := azidentity.NewDefaultAzureCredential(options)
@@ -905,7 +908,6 @@ func (c *cloudClients) initAzureMySQLClient(subscription string) (azure.DBServer
return client, nil
}
- logrus.Debug("Initializing Azure MySQL servers client.")
// TODO(gavin): if/when we support AzureChina/AzureGovernment, we will need to specify the cloud in these options
options := &arm.ClientOptions{}
api, err := armmysql.NewServersClient(subscription, cred, options)
@@ -928,7 +930,6 @@ func (c *cloudClients) initAzurePostgresClient(subscription string) (azure.DBSer
if client, ok := c.azurePostgresClients[subscription]; ok { // If some other thread already got here first.
return client, nil
}
- logrus.Debug("Initializing Azure Postgres servers client.")
// TODO(gavin): if/when we support AzureChina/AzureGovernment, we will need to specify the cloud in these options
options := &arm.ClientOptions{}
api, err := armpostgresql.NewServersClient(subscription, cred, options)
@@ -951,7 +952,6 @@ func (c *cloudClients) initAzureSubscriptionsClient() (*azure.SubscriptionClient
if c.azureSubscriptionsClient != nil { // If some other thread already got here first.
return c.azureSubscriptionsClient, nil
}
- logrus.Debug("Initializing Azure subscriptions client.")
// TODO(gavin): if/when we support AzureChina/AzureGovernment,
// we will need to specify the cloud in these options
opts := &arm.ClientOptions{}
@@ -971,7 +971,6 @@ func (c *cloudClients) initInstanceMetadata(ctx context.Context) (imds.Client, e
if c.instanceMetadata != nil { // If some other thread already got here first.
return c.instanceMetadata, nil
}
- logrus.Debug("Initializing instance metadata client.")
providers := []func(ctx context.Context) (imds.Client, error){
func(ctx context.Context) (imds.Client, error) {
@@ -1012,7 +1011,6 @@ func (c *cloudClients) initAzureKubernetesClient(subscription string) (azure.AKS
if client, ok := c.azureKubernetesClient[subscription]; ok { // If some other thread already got here first.
return client, nil
}
- logrus.Debug("Initializing Azure AKS client.")
// TODO(tigrato): if/when we support AzureChina/AzureGovernment, we will need to specify the cloud in these options
options := &arm.ClientOptions{}
api, err := armcontainerservice.NewManagedClustersClient(subscription, cred, options)
@@ -1332,7 +1330,10 @@ func (c *TestCloudClients) Close() error {
// newSessionWithRole assumes a given AWS IAM Role, passing an external ID if given,
// and returns a new AWS session with the assumed role in the given region.
func newSessionWithRole(ctx context.Context, svc stscreds.AssumeRoler, region, roleARN, externalID string) (*awssession.Session, error) {
- logrus.Debugf("Initializing AWS session for assumed role %q for region %v.", roleARN, region)
+ slog.DebugContext(ctx, "Initializing AWS session for assumed role",
+ "assumed_role", roleARN,
+ "region", region,
+ )
// Make a credentials with AssumeRoleProvider and test it out.
cred := stscreds.NewCredentialsWithClient(svc, roleARN, func(p *stscreds.AssumeRoleProvider) {
if externalID != "" {
diff --git a/lib/cloud/gcp/vm.go b/lib/cloud/gcp/vm.go
index 514d0d3465988..9ebc40b7d88cc 100644
--- a/lib/cloud/gcp/vm.go
+++ b/lib/cloud/gcp/vm.go
@@ -25,6 +25,7 @@ import (
"errors"
"fmt"
"io"
+ "log/slog"
"net"
"slices"
"strings"
@@ -37,7 +38,6 @@ import (
"github.com/googleapis/gax-go/v2/apierror"
"github.com/gravitational/trace"
"github.com/gravitational/trace/trail"
- "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
@@ -553,11 +553,11 @@ https://cloud.google.com/solutions/connecting-securely#storing_host_keys_by_enab
var err error
// Fetch the instance first to get the most up-to-date metadata hash.
if keyReq.Instance, err = req.Client.GetInstance(ctx, &req.InstanceRequest); err != nil {
- logrus.WithError(err).Warn("Error fetching instance.")
+ slog.WarnContext(ctx, "Error fetching instance", "error", err)
return
}
if err := req.Client.RemoveSSHKey(ctx, keyReq); err != nil {
- logrus.WithError(err).Warn("Error deleting SSH Key.")
+ slog.WarnContext(ctx, "Error deleting SSH Key", "error", err)
}
}()
@@ -578,8 +578,10 @@ https://cloud.google.com/solutions/connecting-securely#storing_host_keys_by_enab
for _, ip := range ipAddrs {
addr := net.JoinHostPort(ip, req.SSHPort)
stdout, stderr, err := sshutils.RunSSH(ctx, addr, req.Script, config, sshutils.WithDialer(req.dialContext))
- logrus.Debug(string(stdout))
- logrus.Debug(string(stderr))
+ slog.DebugContext(ctx, "Command completed",
+ "stdoout", string(stdout),
+ "stderr", string(stderr),
+ )
if err == nil {
return nil
}
@@ -592,6 +594,6 @@ https://cloud.google.com/solutions/connecting-securely#storing_host_keys_by_enab
}
err = trace.NewAggregate(errs...)
- logrus.WithError(err).Debug("Command exited with error.")
+ slog.DebugContext(ctx, "Command exited with error", "error", err)
return err
}
diff --git a/lib/fixtures/plugins.go b/lib/fixtures/plugins.go
new file mode 100644
index 0000000000000..1d3a0f5b8f662
--- /dev/null
+++ b/lib/fixtures/plugins.go
@@ -0,0 +1,85 @@
+/*
+ * Teleport
+ * Copyright (C) 2024 Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+package fixtures
+
+import (
+ "testing"
+
+ "github.com/gravitational/teleport/api/types"
+)
+
+// NewIdentityCenterPlugin returns a new types.PluginV1 with PluginSpecV1_AwsIc settings.
+func NewIdentityCenterPlugin(t *testing.T, serviceProviderName, integrationName string) *types.PluginV1 {
+ t.Helper()
+ return &types.PluginV1{
+ Metadata: types.Metadata{
+ Name: types.PluginTypeAWSIdentityCenter,
+ Labels: map[string]string{
+ types.HostedPluginLabel: "true",
+ },
+ },
+ Spec: types.PluginSpecV1{
+ Settings: &types.PluginSpecV1_AwsIc{
+ AwsIc: &types.PluginAWSICSettings{
+ IntegrationName: integrationName,
+ Region: "test-region",
+ Arn: "test-arn",
+ AccessListDefaultOwners: []string{"user1", "user2"},
+ ProvisioningSpec: &types.AWSICProvisioningSpec{
+ BaseUrl: "https://example.com",
+ },
+ SamlIdpServiceProviderName: serviceProviderName,
+ },
+ },
+ },
+ }
+}
+
+// NewIdentityCenterPlugin returns a new types.PluginV1 with PluginSpecV1_Mattermost settings.
+func NewMattermostPlugin(t *testing.T) *types.PluginV1 {
+ t.Helper()
+ return &types.PluginV1{
+ SubKind: types.PluginSubkindAccess,
+ Metadata: types.Metadata{
+ Labels: map[string]string{
+ "teleport.dev/hosted-plugin": "true",
+ },
+ Name: types.PluginTypeMattermost,
+ },
+ Spec: types.PluginSpecV1{
+ Settings: &types.PluginSpecV1_Mattermost{
+ Mattermost: &types.PluginMattermostSettings{
+ ServerUrl: "https://example.com",
+ Channel: "test_channel",
+ Team: "test_team",
+ ReportToEmail: "test@example.com",
+ },
+ },
+ },
+ Credentials: &types.PluginCredentialsV1{
+ Credentials: &types.PluginCredentialsV1_StaticCredentialsRef{
+ StaticCredentialsRef: &types.PluginStaticCredentialsRef{
+ Labels: map[string]string{
+ "plugin": "mattermost",
+ },
+ },
+ },
+ },
+ }
+}
diff --git a/lib/services/local/integrations.go b/lib/services/local/integrations.go
index 3b9842ee79690..8137b3fbbbb77 100644
--- a/lib/services/local/integrations.go
+++ b/lib/services/local/integrations.go
@@ -212,9 +212,11 @@ func integrationReferencedByAWSICPlugin(ctx context.Context, bk backend.Backend,
if !ok {
continue
}
-
- if pluginV1.GetType() == types.PluginType(types.PluginTypeAWSIdentityCenter) {
- switch pluginV1.Spec.GetAwsIc().IntegrationName {
+ if pluginV1.GetType() != types.PluginType(types.PluginTypeAWSIdentityCenter) {
+ continue
+ }
+ if awsIC := pluginV1.Spec.GetAwsIc(); awsIC != nil {
+ switch awsIC.IntegrationName {
case name:
return nil, trace.BadParameter("cannot delete AWS OIDC integration currently referenced by AWS Identity Center integration %q", pluginV1.GetName())
default:
diff --git a/lib/services/local/saml_idp_service_provider.go b/lib/services/local/saml_idp_service_provider.go
index da99ef05d8ad0..6b08cf084afd9 100644
--- a/lib/services/local/saml_idp_service_provider.go
+++ b/lib/services/local/saml_idp_service_provider.go
@@ -419,9 +419,13 @@ func spReferencedByAWSICPlugin(ctx context.Context, bk backend.Backend, serviceP
if !ok {
continue
}
-
- if pluginV1.Spec.GetAwsIc().SamlIdpServiceProviderName == serviceProviderName {
- return trace.BadParameter("cannot delete SAML service provider currently referenced by AWS Identity Center integration %q", pluginV1.GetName())
+ if pluginV1.GetType() != types.PluginType(types.PluginTypeAWSIdentityCenter) {
+ continue
+ }
+ if awsIC := pluginV1.Spec.GetAwsIc(); awsIC != nil {
+ if awsIC.SamlIdpServiceProviderName == serviceProviderName {
+ return trace.BadParameter("cannot delete SAML service provider currently referenced by AWS Identity Center integration %q", pluginV1.GetName())
+ }
}
}
diff --git a/lib/services/local/saml_idp_service_provider_test.go b/lib/services/local/saml_idp_service_provider_test.go
index aafdedba9a5da..b1161ec577d2b 100644
--- a/lib/services/local/saml_idp_service_provider_test.go
+++ b/lib/services/local/saml_idp_service_provider_test.go
@@ -36,6 +36,7 @@ import (
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/backend/memory"
+ "github.com/gravitational/teleport/lib/fixtures"
"github.com/gravitational/teleport/lib/services"
)
@@ -613,37 +614,13 @@ func TestDeleteSAMLServiceProviderWhenReferencedByPlugin(t *testing.T) {
require.NoError(t, samlService.CreateSAMLIdPServiceProvider(ctx, sp))
// service provider should not be deleted when referenced by the plugin.
- require.NoError(t, pluginService.CreatePlugin(ctx, newPlugin(t, sp.GetName())))
+ require.NoError(t, pluginService.CreatePlugin(ctx, fixtures.NewIdentityCenterPlugin(t, sp.GetName(), sp.GetName())))
err = samlService.DeleteSAMLIdPServiceProvider(ctx, sp.GetName())
require.ErrorContains(t, err, "referenced by AWS Identity Center integration")
// service provider should be deleted once the referenced plugin itself is deleted.
+ // other existing plugin should not prevent SAML service provider from deletion.
+ require.NoError(t, pluginService.CreatePlugin(ctx, fixtures.NewMattermostPlugin(t)))
require.NoError(t, pluginService.DeletePlugin(ctx, types.PluginTypeAWSIdentityCenter))
require.NoError(t, samlService.DeleteSAMLIdPServiceProvider(ctx, sp.GetName()))
}
-
-func newPlugin(t *testing.T, serviceProviderName string) *types.PluginV1 {
- t.Helper()
- return &types.PluginV1{
- Metadata: types.Metadata{
- Name: types.PluginTypeAWSIdentityCenter,
- Labels: map[string]string{
- types.HostedPluginLabel: "true",
- },
- },
- Spec: types.PluginSpecV1{
- Settings: &types.PluginSpecV1_AwsIc{
- AwsIc: &types.PluginAWSICSettings{
- IntegrationName: "test-integration",
- Region: "test-region",
- Arn: "test-arn",
- AccessListDefaultOwners: []string{"user1", "user2"},
- ProvisioningSpec: &types.AWSICProvisioningSpec{
- BaseUrl: "https://example.com",
- },
- SamlIdpServiceProviderName: serviceProviderName,
- },
- },
- },
- }
-}
diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go
index 62ecb813a5b6e..a21d165e382de 100644
--- a/lib/web/apiserver.go
+++ b/lib/web/apiserver.go
@@ -917,8 +917,9 @@ func (h *Handler) bindDefaultEndpoints() {
// MFA private endpoints.
h.GET("/webapi/mfa/devices", h.WithAuth(h.getMFADevicesHandle))
- h.DELETE("/webapi/mfa/devices", h.WithAuth(h.deleteMFADeviceHandle))
h.POST("/webapi/mfa/authenticatechallenge", h.WithAuth(h.createAuthenticateChallengeHandle))
+ // TODO(Joerger) v19.0.0: currently unused, WebUI can use these in v19 without backwards compatibility concerns.
+ h.DELETE("/webapi/mfa/devices", h.WithAuth(h.deleteMFADeviceHandle))
h.POST("/webapi/mfa/registerchallenge", h.WithAuth(h.createRegisterChallengeHandle))
h.POST("/webapi/mfa/devices", h.WithAuth(h.addMFADeviceHandle))
diff --git a/lib/web/mfa.go b/lib/web/mfa.go
index 2ab9bfa281636..485a4eff460bc 100644
--- a/lib/web/mfa.go
+++ b/lib/web/mfa.go
@@ -75,7 +75,7 @@ func (h *Handler) deleteMFADeviceWithTokenHandle(w http.ResponseWriter, r *http.
return OK(), nil
}
-type deleteMfaDeviceRequest struct {
+type deleteMFADeviceRequest struct {
// DeviceName is the name of the device to delete.
DeviceName string `json:"deviceName"`
// ExistingMFAResponse is an MFA challenge response from an existing device.
@@ -85,7 +85,7 @@ type deleteMfaDeviceRequest struct {
// deleteMFADeviceHandle deletes an mfa device for the user defined in the `token`, given as a query parameter.
func (h *Handler) deleteMFADeviceHandle(w http.ResponseWriter, r *http.Request, p httprouter.Params, c *SessionContext) (interface{}, error) {
- var req deleteMfaDeviceRequest
+ var req deleteMFADeviceRequest
if err := httplib.ReadJSON(r, &req); err != nil {
return nil, trace.Wrap(err)
}
diff --git a/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx b/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx
index bbce32df63f98..9633fe814a952 100644
--- a/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx
+++ b/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx
@@ -101,7 +101,7 @@ export function AddAuthDeviceWizard({
// TODO(Joerger): v19.0.0
// A user without devices can register their first device without a privilege token
// too, but the existing web register endpoint requires privilege token.
- // We have a new endpoint "/v1/webapi/users/devices" which does not
+ // We have a new endpoint "/v1/webapi/mfa/registerchallenge" which does not
// require token, but can't be used until v19 for backwards compatibility.
// Once in use, we can leave privilege token empty here.
useEffect(() => {
diff --git a/web/packages/teleport/src/Integrations/IntegrationList.test.tsx b/web/packages/teleport/src/Integrations/IntegrationList.test.tsx
new file mode 100644
index 0000000000000..3012e356a4788
--- /dev/null
+++ b/web/packages/teleport/src/Integrations/IntegrationList.test.tsx
@@ -0,0 +1,64 @@
+/**
+ * Teleport
+ * Copyright (C) 2023 Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+import { fireEvent, render, screen, userEvent } from 'design/utils/testing';
+
+import { Router } from 'react-router';
+
+import { createMemoryHistory } from 'history';
+
+import { IntegrationList } from 'teleport/Integrations/IntegrationList';
+import {
+ IntegrationKind,
+ IntegrationStatusCode,
+} from 'teleport/services/integrations';
+
+test('integration list shows edit and view action menu for aws-oidc, row click navigates', async () => {
+ const history = createMemoryHistory();
+ history.push = jest.fn();
+
+ render(
+
+
+
+ );
+
+ fireEvent.click(screen.getByRole('button', { name: 'Options' }));
+ expect(screen.getByText('View Status')).toBeInTheDocument();
+ expect(screen.getByText('View Status')).toHaveAttribute(
+ 'href',
+ '/web/integrations/status/aws-oidc/aws-integration'
+ );
+ expect(screen.getByText('Edit...')).toBeInTheDocument();
+ expect(screen.getByText('Delete...')).toBeInTheDocument();
+
+ await userEvent.click(screen.getAllByRole('row')[1]);
+ expect(history.push).toHaveBeenCalledWith(
+ '/web/integrations/status/aws-oidc/aws-integration'
+ );
+});
diff --git a/web/packages/teleport/src/Integrations/IntegrationList.tsx b/web/packages/teleport/src/Integrations/IntegrationList.tsx
index 8e3e41526ac77..a7eadd708a7e1 100644
--- a/web/packages/teleport/src/Integrations/IntegrationList.tsx
+++ b/web/packages/teleport/src/Integrations/IntegrationList.tsx
@@ -66,7 +66,7 @@ export function IntegrationList(props: Props) {
const history = useHistory();
function handleRowClick(row: IntegrationLike) {
- if (row.kind !== 'okta') return;
+ if (row.kind !== 'okta' && row.kind !== IntegrationKind.AwsOidc) return;
history.push(cfg.getIntegrationStatusRoute(row.kind, row.name));
}
@@ -154,15 +154,26 @@ export function IntegrationList(props: Props) {
return (
- {/* Currently, only AWSOIDC supports editing. */}
+ {/* Currently, only AWS OIDC supports editing & status dash */}
{item.kind === IntegrationKind.AwsOidc && (
-
+ <>
+
+
+ >
)}
|