From 84e9f2080472432155cb0da865fc0af7c38d1c11 Mon Sep 17 00:00:00 2001 From: Noah Stride Date: Wed, 8 Jan 2025 09:59:15 +0000 Subject: [PATCH 01/45] Encode Join Attributes in Bot Certificates (#49426) * Persist Join Attributes in X509 Cert * Use proto names when encoding * Fix kube tests * Fix other kube tests * Fix interface nilness issue * Add some tests to the TLSCA package and issuer * Add more E2E style test that covers join attributes and workload id * Explain test better * Add JoinAttrs test for bots * Remove methods no longer necesarry * Fix imports * Fix deprecation version * Add comment explaining why we return even on failure * Add GoDoc * Fix logger * Use auth server logger * Remove unneccessary import --- .../teleport/machineid/v1/bot_instance.pb.go | 3 + .../teleport/machineid/v1/bot_instance.proto | 4 + lib/auth/auth.go | 7 +- lib/auth/auth_with_roles.go | 3 + lib/auth/bot.go | 25 ++- lib/auth/bot_test.go | 144 ++++++++++++- lib/auth/join.go | 161 ++++++++------ lib/auth/join_azure.go | 43 ++-- lib/auth/join_iam.go | 59 ++++-- lib/auth/join_tpm.go | 19 +- .../workloadidentityv1/decision_test.go | 17 ++ .../workloadidentityv1/issuer_service.go | 1 + .../workloadidentityv1_test.go | 197 ++++++++++++++++++ lib/bitbucket/bitbucket.go | 29 ++- lib/circleci/circleci.go | 26 +-- lib/gcp/gcp.go | 32 +-- lib/githubactions/githubactions.go | 34 +-- lib/gitlab/gitlab.go | 39 ++-- lib/kube/token/validator.go | 70 +++++-- lib/kube/token/validator_test.go | 71 ++++++- lib/spacelift/spacelift.go | 30 ++- lib/terraformcloud/terraform.go | 30 ++- lib/tlsca/ca.go | 41 ++++ lib/tlsca/ca_test.go | 54 +++++ lib/tpm/validate.go | 21 +- tool/tctl/common/bots_command.go | 23 +- 26 files changed, 931 insertions(+), 252 deletions(-) diff --git a/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go b/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go index 757c72160aa17..ec0d5c2dd24d3 100644 --- a/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go @@ -318,11 +318,14 @@ type BotInstanceStatusAuthentication struct { // Server. AuthenticatedAt *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=authenticated_at,json=authenticatedAt,proto3" json:"authenticated_at,omitempty"` // The join method used for this join or renewal. + // Deprecated: prefer using join_attrs.meta.join_method JoinMethod string `protobuf:"bytes,2,opt,name=join_method,json=joinMethod,proto3" json:"join_method,omitempty"` // The join token used for this join or renewal. This is only populated for // delegated join methods as the value for `token` join methods is sensitive. + // Deprecated: prefer using join_attrs.meta.join_token_name JoinToken string `protobuf:"bytes,3,opt,name=join_token,json=joinToken,proto3" json:"join_token,omitempty"` // The metadata sourced from the join method. + // Deprecated: prefer using join_attrs. Metadata *structpb.Struct `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` // On each renewal, this generation is incremented. For delegated join // methods, this counter is not checked during renewal. For the `token` join diff --git a/api/proto/teleport/machineid/v1/bot_instance.proto b/api/proto/teleport/machineid/v1/bot_instance.proto index 5904e8896a6bd..76a3820f2bfac 100644 --- a/api/proto/teleport/machineid/v1/bot_instance.proto +++ b/api/proto/teleport/machineid/v1/bot_instance.proto @@ -90,12 +90,16 @@ message BotInstanceStatusAuthentication { // Server. google.protobuf.Timestamp authenticated_at = 1; // The join method used for this join or renewal. + // Deprecated: prefer using join_attrs.meta.join_method string join_method = 2; // The join token used for this join or renewal. This is only populated for // delegated join methods as the value for `token` join methods is sensitive. + // Deprecated: prefer using join_attrs.meta.join_token_name string join_token = 3; // The metadata sourced from the join method. + // Deprecated: prefer using join_attrs. google.protobuf.Struct metadata = 4; + // On each renewal, this generation is incremented. For delegated join // methods, this counter is not checked during renewal. For the `token` join // method, this counter is checked during renewal and the Bot is locked out if diff --git a/lib/auth/auth.go b/lib/auth/auth.go index 067cea661c7e1..82bd49e68befb 100644 --- a/lib/auth/auth.go +++ b/lib/auth/auth.go @@ -71,6 +71,7 @@ import ( headerv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/header/v1" mfav1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/mfa/v1" notificationsv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/notifications/v1" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/internalutils/stream" "github.com/gravitational/teleport/api/metadata" "github.com/gravitational/teleport/api/types" @@ -2290,6 +2291,9 @@ type certRequest struct { // botInstanceID is the unique identifier of the bot instance associated // with this cert, if any botInstanceID string + // joinAttributes holds attributes derived from attested metadata from the + // join process, should any exist. + joinAttributes *workloadidentityv1pb.JoinAttrs } // check verifies the cert request is valid. @@ -3370,7 +3374,8 @@ func generateCert(ctx context.Context, a *Server, req certRequest, caType types. AssetTag: req.deviceExtensions.AssetTag, CredentialID: req.deviceExtensions.CredentialID, }, - UserType: req.user.GetUserType(), + UserType: req.user.GetUserType(), + JoinAttributes: req.joinAttributes, } var signedTLSCert []byte diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index 85cc6fe6237b1..fe50d3d0af68d 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -3453,6 +3453,9 @@ func (a *ServerWithRoles) generateUserCerts(ctx context.Context, req proto.UserC // `updateBotInstance()` is called below, and this (empty) value will be // overridden. botInstanceID: a.context.Identity.GetIdentity().BotInstanceID, + // Propagate any join attributes from the current identity to the new + // identity. + joinAttributes: a.context.Identity.GetIdentity().JoinAttributes, } if user.GetName() != a.context.User.GetName() { diff --git a/lib/auth/bot.go b/lib/auth/bot.go index 104518ea7687e..c08ae5f1d7580 100644 --- a/lib/auth/bot.go +++ b/lib/auth/bot.go @@ -31,6 +31,7 @@ import ( "github.com/gravitational/teleport/api/client/proto" headerv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/header/v1" machineidv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/machineid/v1" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" apievents "github.com/gravitational/teleport/api/types/events" apiutils "github.com/gravitational/teleport/api/utils" @@ -315,7 +316,7 @@ func (a *Server) updateBotInstance( if templateAuthRecord != nil { authRecord.JoinToken = templateAuthRecord.JoinToken authRecord.JoinMethod = templateAuthRecord.JoinMethod - authRecord.Metadata = templateAuthRecord.Metadata + authRecord.JoinAttrs = templateAuthRecord.JoinAttrs } // An empty bot instance most likely means a bot is rejoining after an @@ -493,6 +494,7 @@ func (a *Server) generateInitialBotCerts( expires time.Time, renewable bool, initialAuth *machineidv1pb.BotInstanceStatusAuthentication, existingInstanceID string, currentIdentityGeneration int32, + joinAttrs *workloadidentityv1pb.JoinAttrs, ) (*proto.Certs, string, error) { var err error @@ -535,16 +537,17 @@ func (a *Server) generateInitialBotCerts( // Generate certificate certReq := certRequest{ - user: userState, - ttl: expires.Sub(a.GetClock().Now()), - sshPublicKey: sshPubKey, - tlsPublicKey: tlsPubKey, - checker: checker, - traits: accessInfo.Traits, - renewable: renewable, - includeHostCA: true, - loginIP: loginIP, - botName: botName, + user: userState, + ttl: expires.Sub(a.GetClock().Now()), + sshPublicKey: sshPubKey, + tlsPublicKey: tlsPubKey, + checker: checker, + traits: accessInfo.Traits, + renewable: renewable, + includeHostCA: true, + loginIP: loginIP, + botName: botName, + joinAttributes: joinAttrs, } if existingInstanceID == "" { diff --git a/lib/auth/bot_test.go b/lib/auth/bot_test.go index ae4ddb14136b9..2e019ffa7123e 100644 --- a/lib/auth/bot_test.go +++ b/lib/auth/bot_test.go @@ -42,6 +42,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/crypto/ssh" "google.golang.org/grpc" + "google.golang.org/protobuf/testing/protocmp" "github.com/gravitational/teleport" apiclient "github.com/gravitational/teleport/api/client" @@ -49,10 +50,12 @@ import ( "github.com/gravitational/teleport/api/client/webclient" headerv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/header/v1" machineidv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/machineid/v1" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/metadata" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/api/utils/keys" + "github.com/gravitational/teleport/integrations/lib/testing/fakejoin" "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/auth/join" "github.com/gravitational/teleport/lib/auth/machineid/machineidv1" @@ -216,6 +219,146 @@ func TestRegisterBotCertificateGenerationCheck(t *testing.T) { } } +// TestBotJoinAttrs_Kubernetes validates that a bot can join using the +// Kubernetes join method and that the correct join attributes are encoded in +// the resulting bot cert, and, that when this cert is used to produce role +// certificates, the correct attributes are encoded in the role cert. +// +// Whilst this specifically tests the Kubernetes join method, it tests by proxy +// the implementation for most of the join methods. +func TestBotJoinAttrs_Kubernetes(t *testing.T) { + t.Parallel() + + srv := newTestTLSServer(t) + ctx := context.Background() + + role, err := CreateRole(ctx, srv.Auth(), "example", types.RoleSpecV6{}) + require.NoError(t, err) + + // Create a new bot. + client, err := srv.NewClient(TestAdmin()) + require.NoError(t, err) + bot, err := client.BotServiceClient().CreateBot(ctx, &machineidv1pb.CreateBotRequest{ + Bot: &machineidv1pb.Bot{ + Metadata: &headerv1.Metadata{ + Name: "test", + }, + Spec: &machineidv1pb.BotSpec{ + Roles: []string{"example"}, + }, + }, + }) + require.NoError(t, err) + + k8s, err := fakejoin.NewKubernetesSigner(srv.Clock()) + require.NoError(t, err) + jwks, err := k8s.GetMarshaledJWKS() + require.NoError(t, err) + fakePSAT, err := k8s.SignServiceAccountJWT( + "my-pod", + "my-namespace", + "my-service-account", + srv.ClusterName(), + ) + require.NoError(t, err) + + tok, err := types.NewProvisionTokenFromSpec( + "my-k8s-token", + time.Time{}, + types.ProvisionTokenSpecV2{ + Roles: types.SystemRoles{types.RoleBot}, + JoinMethod: types.JoinMethodKubernetes, + BotName: bot.Metadata.Name, + Kubernetes: &types.ProvisionTokenSpecV2Kubernetes{ + Type: types.KubernetesJoinTypeStaticJWKS, + StaticJWKS: &types.ProvisionTokenSpecV2Kubernetes_StaticJWKSConfig{ + JWKS: jwks, + }, + Allow: []*types.ProvisionTokenSpecV2Kubernetes_Rule{ + { + ServiceAccount: "my-namespace:my-service-account", + }, + }, + }, + }, + ) + require.NoError(t, err) + require.NoError(t, client.CreateToken(ctx, tok)) + + result, err := join.Register(ctx, join.RegisterParams{ + Token: tok.GetName(), + JoinMethod: types.JoinMethodKubernetes, + ID: state.IdentityID{ + Role: types.RoleBot, + }, + AuthServers: []utils.NetAddr{*utils.MustParseAddr(srv.Addr().String())}, + KubernetesReadFileFunc: func(name string) ([]byte, error) { + return []byte(fakePSAT), nil + }, + }) + require.NoError(t, err) + + // Validate correct join attributes are encoded. + cert, err := tlsca.ParseCertificatePEM(result.Certs.TLS) + require.NoError(t, err) + ident, err := tlsca.FromSubject(cert.Subject, cert.NotAfter) + require.NoError(t, err) + wantAttrs := &workloadidentityv1pb.JoinAttrs{ + Meta: &workloadidentityv1pb.JoinAttrsMeta{ + JoinTokenName: tok.GetName(), + JoinMethod: string(types.JoinMethodKubernetes), + }, + Kubernetes: &workloadidentityv1pb.JoinAttrsKubernetes{ + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Namespace: "my-namespace", + Name: "my-service-account", + }, + Pod: &workloadidentityv1pb.JoinAttrsKubernetesPod{ + Name: "my-pod", + }, + Subject: "system:serviceaccount:my-namespace:my-service-account", + }, + } + require.Empty(t, cmp.Diff( + ident.JoinAttributes, + wantAttrs, + protocmp.Transform(), + )) + + // Now, try to produce a role certificate using the bot cert, to ensure + // that the join attributes are correctly propagated. + privateKeyPEM, err := keys.MarshalPrivateKey(result.PrivateKey) + require.NoError(t, err) + tlsCert, err := tls.X509KeyPair(result.Certs.TLS, privateKeyPEM) + require.NoError(t, err) + sshPub, err := ssh.NewPublicKey(result.PrivateKey.Public()) + require.NoError(t, err) + tlsPub, err := keys.MarshalPublicKey(result.PrivateKey.Public()) + require.NoError(t, err) + botClient := srv.NewClientWithCert(tlsCert) + roleCerts, err := botClient.GenerateUserCerts(ctx, proto.UserCertsRequest{ + SSHPublicKey: ssh.MarshalAuthorizedKey(sshPub), + TLSPublicKey: tlsPub, + Username: bot.Status.UserName, + RoleRequests: []string{ + role.GetName(), + }, + UseRoleRequests: true, + Expires: srv.Clock().Now().Add(time.Hour), + }) + require.NoError(t, err) + + roleCert, err := tlsca.ParseCertificatePEM(roleCerts.TLS) + require.NoError(t, err) + roleIdent, err := tlsca.FromSubject(roleCert.Subject, roleCert.NotAfter) + require.NoError(t, err) + require.Empty(t, cmp.Diff( + roleIdent.JoinAttributes, + wantAttrs, + protocmp.Transform(), + )) +} + // TestRegisterBotInstance tests that bot instances are created properly on join func TestRegisterBotInstance(t *testing.T) { t.Parallel() @@ -282,7 +425,6 @@ func TestRegisterBotInstance(t *testing.T) { require.Equal(t, int32(1), ia.Generation) require.Equal(t, string(types.JoinMethodToken), ia.JoinMethod) require.Equal(t, token.GetSafeName(), ia.JoinToken) - // The latest authentications field should contain the same record (and // only that record.) require.Len(t, botInstance.GetStatus().LatestAuthentications, 1) diff --git a/lib/auth/join.go b/lib/auth/join.go index 00d4f8847f1e9..ad92db5eb3a0d 100644 --- a/lib/auth/join.go +++ b/lib/auth/join.go @@ -22,6 +22,7 @@ import ( "context" "crypto/rand" "encoding/base64" + "encoding/json" "log/slog" "net" "slices" @@ -34,6 +35,7 @@ import ( "github.com/gravitational/teleport/api/client/proto" machineidv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/machineid/v1" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" apievents "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/lib/auth/machineid/machineidv1" @@ -104,12 +106,6 @@ func (a *Server) checkTokenJoinRequestCommon(ctx context.Context, req *types.Reg return provisionToken, nil } -type joinAttributeSourcer interface { - // JoinAuditAttributes returns a series of attributes that can be inserted into - // audit events related to a specific join. - JoinAuditAttributes() (map[string]interface{}, error) -} - func setRemoteAddrFromContext(ctx context.Context, req *types.RegisterUsingTokenRequest) error { var addr string if clientIP, err := authz.ClientSrcAddrFromContext(ctx); err == nil { @@ -132,7 +128,7 @@ func (a *Server) handleJoinFailure( ctx context.Context, origErr error, pt types.ProvisionToken, - attributeSource joinAttributeSourcer, + rawJoinAttrs any, req *types.RegisterUsingTokenRequest, ) { attrs := []slog.Attr{slog.Any("error", origErr)} @@ -145,19 +141,13 @@ func (a *Server) handleJoinFailure( }...) } - // Fetch and encode attributes if they are available. - var attributesProto *apievents.Struct - if attributeSource != nil { - var err error - attributes, err := attributeSource.JoinAuditAttributes() - if err != nil { - a.logger.WarnContext(ctx, "Unable to fetch join attributes from join method", "error", err) - } - attrs = append(attrs, slog.Any("attributes", attributes)) - attributesProto, err = apievents.EncodeMap(attributes) - if err != nil { - a.logger.WarnContext(ctx, "Unable to encode join attributes for audit event", "error", err) - } + // Fetch and encode rawJoinAttrs if they are available. + attributesStruct, err := rawJoinAttrsToStruct(rawJoinAttrs) + if err != nil { + a.logger.WarnContext(ctx, "Unable to fetch join attributes from join method", "error", err) + } + if attributesStruct != nil { + attrs = append(attrs, slog.Any("attributes", attributesStruct)) } // Add log fields from token if available. @@ -179,7 +169,7 @@ func (a *Server) handleJoinFailure( Code: events.BotJoinFailureCode, }, Status: status, - Attributes: attributesProto, + Attributes: attributesStruct, ConnectionMetadata: apievents.ConnectionMetadata{ RemoteAddr: req.RemoteAddr, }, @@ -197,7 +187,7 @@ func (a *Server) handleJoinFailure( Code: events.InstanceJoinFailureCode, }, Status: status, - Attributes: attributesProto, + Attributes: attributesStruct, } if pt != nil { instanceJoinEvent.Method = string(pt.GetJoinMethod()) @@ -228,12 +218,13 @@ func (a *Server) handleJoinFailure( // If the token includes a specific join method, the rules for that join method // will be checked. func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsingTokenRequest) (certs *proto.Certs, err error) { - var joinAttributeSrc joinAttributeSourcer + attrs := &workloadidentityv1pb.JoinAttrs{} + var rawClaims any var provisionToken types.ProvisionToken defer func() { // Emit a log message and audit event on join failure. if err != nil { - a.handleJoinFailure(ctx, err, provisionToken, joinAttributeSrc, req) + a.handleJoinFailure(ctx, err, provisionToken, rawClaims, req) } }() @@ -255,7 +246,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodGitHub: claims, err := a.checkGitHubJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.Github = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -263,7 +255,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodGitLab: claims, err := a.checkGitLabJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.Gitlab = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -271,7 +264,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodCircleCI: claims, err := a.checkCircleCIJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.Circleci = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -279,7 +273,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodKubernetes: claims, err := a.checkKubernetesJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.Kubernetes = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -287,7 +282,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodGCP: claims, err := a.checkGCPJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.Gcp = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -295,7 +291,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodSpacelift: claims, err := a.checkSpaceliftJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.Spacelift = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -303,7 +300,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodTerraformCloud: claims, err := a.checkTerraformCloudJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.TerraformCloud = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -311,7 +309,8 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin case types.JoinMethodBitbucket: claims, err := a.checkBitbucketJoinRequest(ctx, req) if claims != nil { - joinAttributeSrc = claims + rawClaims = claims + attrs.Bitbucket = claims.JoinAttrs() } if err != nil { return nil, trace.Wrap(err) @@ -334,10 +333,16 @@ func (a *Server) RegisterUsingToken(ctx context.Context, req *types.RegisterUsin // With all elements of the token validated, we can now generate & return // certificates. if req.Role == types.RoleBot { - certs, err = a.generateCertsBot(ctx, provisionToken, req, joinAttributeSrc) + certs, err = a.generateCertsBot( + ctx, + provisionToken, + req, + rawClaims, + attrs, + ) return certs, trace.Wrap(err) } - certs, err = a.generateCerts(ctx, provisionToken, req, joinAttributeSrc) + certs, err = a.generateCerts(ctx, provisionToken, req, rawClaims) return certs, trace.Wrap(err) } @@ -345,7 +350,8 @@ func (a *Server) generateCertsBot( ctx context.Context, provisionToken types.ProvisionToken, req *types.RegisterUsingTokenRequest, - joinAttributeSrc joinAttributeSourcer, + rawJoinClaims any, + attrs *workloadidentityv1pb.JoinAttrs, ) (*proto.Certs, error) { // bots use this endpoint but get a user cert // botResourceName must be set, enforced in CheckAndSetDefaults @@ -393,6 +399,27 @@ func (a *Server) generateCertsBot( RemoteAddr: req.RemoteAddr, }, } + var err error + joinEvent.Attributes, err = rawJoinAttrsToStruct(rawJoinClaims) + if err != nil { + a.logger.WarnContext( + ctx, + "Unable to encode join attributes for join audit event", + "error", err, + ) + } + + // Prepare join attributes for encoding into the X509 cert and for inclusion + // in audit logs. + if attrs == nil { + attrs = &workloadidentityv1pb.JoinAttrs{} + } + attrs.Meta = &workloadidentityv1pb.JoinAttrsMeta{ + JoinMethod: string(joinMethod), + } + if joinMethod != types.JoinMethodToken { + attrs.Meta.JoinTokenName = provisionToken.GetName() + } auth := &machineidv1pb.BotInstanceStatusAuthentication{ AuthenticatedAt: timestamppb.New(a.GetClock().Now()), @@ -404,22 +431,13 @@ func (a *Server) generateCertsBot( // TODO(nklaassen): consider logging the SSH public key as well, for now // the SSH and TLS public keys are still identical for tbot. PublicKey: req.PublicTLSKey, + JoinAttrs: attrs, } - if joinAttributeSrc != nil { - attributes, err := joinAttributeSrc.JoinAuditAttributes() - if err != nil { - a.logger.WarnContext(ctx, "Unable to fetch join attributes from join method", "error", err) - } - joinEvent.Attributes, err = apievents.EncodeMap(attributes) - if err != nil { - a.logger.WarnContext(ctx, "Unable to encode join attributes for audit event", "error", err) - } - - auth.Metadata, err = structpb.NewStruct(attributes) - if err != nil { - a.logger.WarnContext(ctx, "Unable to encode struct value for join metadata", "error", err) - } + // TODO(noah): In v19, we can drop writing to the deprecated Metadata field. + auth.Metadata, err = rawJoinAttrsToGoogleStruct(rawJoinClaims) + if err != nil { + a.logger.WarnContext(ctx, "Unable to encode struct value for join metadata", "error", err) } certs, botInstanceID, err := a.generateInitialBotCerts( @@ -434,6 +452,7 @@ func (a *Server) generateCertsBot( auth, req.BotInstanceID, req.BotGeneration, + attrs, ) if err != nil { return nil, trace.Wrap(err) @@ -465,7 +484,7 @@ func (a *Server) generateCerts( ctx context.Context, provisionToken types.ProvisionToken, req *types.RegisterUsingTokenRequest, - joinAttributeSrc joinAttributeSourcer, + rawJoinClaims any, ) (*proto.Certs, error) { if req.Expires != nil { return nil, trace.BadParameter("'expires' cannot be set on join for non-bot certificates") @@ -534,15 +553,9 @@ func (a *Server) generateCerts( RemoteAddr: req.RemoteAddr, }, } - if joinAttributeSrc != nil { - attributes, err := joinAttributeSrc.JoinAuditAttributes() - if err != nil { - a.logger.WarnContext(ctx, "Unable to fetch join attributes from join method", "error", err) - } - joinEvent.Attributes, err = apievents.EncodeMap(attributes) - if err != nil { - a.logger.WarnContext(ctx, "Unable to encode join attributes for audit event", "error", err) - } + joinEvent.Attributes, err = rawJoinAttrsToStruct(rawJoinClaims) + if err != nil { + a.logger.WarnContext(ctx, "Unable to fetch join attributes from join method", "error", err) } if err := a.emitter.EmitAuditEvent(ctx, joinEvent); err != nil { a.logger.WarnContext(ctx, "Failed to emit instance join event", "error", err) @@ -550,6 +563,36 @@ func (a *Server) generateCerts( return certs, nil } +func rawJoinAttrsToStruct(in any) (*apievents.Struct, error) { + if in == nil { + return nil, nil + } + attrBytes, err := json.Marshal(in) + if err != nil { + return nil, trace.Wrap(err, "marshaling join attributes") + } + out := &apievents.Struct{} + if err := out.UnmarshalJSON(attrBytes); err != nil { + return nil, trace.Wrap(err, "unmarshaling join attributes") + } + return out, nil +} + +func rawJoinAttrsToGoogleStruct(in any) (*structpb.Struct, error) { + if in == nil { + return nil, nil + } + attrBytes, err := json.Marshal(in) + if err != nil { + return nil, trace.Wrap(err, "marshaling join attributes") + } + out := &structpb.Struct{} + if err := out.UnmarshalJSON(attrBytes); err != nil { + return nil, trace.Wrap(err, "unmarshaling join attributes") + } + return out, nil +} + func generateChallenge(encoding *base64.Encoding, length int) (string, error) { // read crypto-random bytes to generate the challenge challengeRawBytes := make([]byte, length) diff --git a/lib/auth/join_azure.go b/lib/auth/join_azure.go index 70ae17918b7fa..df5a1632e05e0 100644 --- a/lib/auth/join_azure.go +++ b/lib/auth/join_azure.go @@ -38,6 +38,7 @@ import ( "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/proto" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" "github.com/gravitational/teleport/lib/utils" @@ -312,37 +313,49 @@ func azureResourceGroupIsAllowed(allowedResourceGroups []string, vmResourceGroup return false } -func (a *Server) checkAzureRequest(ctx context.Context, challenge string, req *proto.RegisterUsingAzureMethodRequest, cfg *azureRegisterConfig) error { +func azureJoinToAttrs(vm *azure.VirtualMachine) *workloadidentityv1pb.JoinAttrsAzure { + return &workloadidentityv1pb.JoinAttrsAzure{ + Subscription: vm.Subscription, + ResourceGroup: vm.ResourceGroup, + } +} + +func (a *Server) checkAzureRequest( + ctx context.Context, + challenge string, + req *proto.RegisterUsingAzureMethodRequest, + cfg *azureRegisterConfig, +) (*workloadidentityv1pb.JoinAttrsAzure, error) { requestStart := a.clock.Now() tokenName := req.RegisterUsingTokenRequest.Token provisionToken, err := a.GetToken(ctx, tokenName) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } if provisionToken.GetJoinMethod() != types.JoinMethodAzure { - return trace.AccessDenied("this token does not support the Azure join method") + return nil, trace.AccessDenied("this token does not support the Azure join method") + } + token, ok := provisionToken.(*types.ProvisionTokenV2) + if !ok { + return nil, trace.BadParameter("azure join method only supports ProvisionTokenV2, '%T' was provided", provisionToken) } subID, vmID, err := parseAndVerifyAttestedData(ctx, req.AttestedData, challenge, cfg.certificateAuthorities) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } vm, err := verifyVMIdentity(ctx, cfg, req.AccessToken, subID, vmID, requestStart) if err != nil { - return trace.Wrap(err) - } - - token, ok := provisionToken.(*types.ProvisionTokenV2) - if !ok { - return trace.BadParameter("azure join method only supports ProvisionTokenV2, '%T' was provided", provisionToken) + return nil, trace.Wrap(err) } + attrs := azureJoinToAttrs(vm) if err := checkAzureAllowRules(vm, token.GetName(), token.Spec.Azure.Allow); err != nil { - return trace.Wrap(err) + return attrs, trace.Wrap(err) } - return nil + return attrs, nil } func generateAzureChallenge() (string, error) { @@ -397,7 +410,8 @@ func (a *Server) RegisterUsingAzureMethodWithOpts( return nil, trace.Wrap(err) } - if err := a.checkAzureRequest(ctx, challenge, req, cfg); err != nil { + joinAttrs, err := a.checkAzureRequest(ctx, challenge, req, cfg) + if err != nil { return nil, trace.Wrap(err) } @@ -407,6 +421,9 @@ func (a *Server) RegisterUsingAzureMethodWithOpts( provisionToken, req.RegisterUsingTokenRequest, nil, + &workloadidentityv1pb.JoinAttrs{ + Azure: joinAttrs, + }, ) return certs, trace.Wrap(err) } diff --git a/lib/auth/join_iam.go b/lib/auth/join_iam.go index 7b284733bee1c..9ecfedd07bebd 100644 --- a/lib/auth/join_iam.go +++ b/lib/auth/join_iam.go @@ -34,6 +34,7 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/proto" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/auth/join/iam" "github.com/gravitational/teleport/lib/utils" @@ -172,6 +173,18 @@ type awsIdentity struct { Arn string `json:"Arn"` } +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *awsIdentity) JoinAttrs() *workloadidentityv1pb.JoinAttrsAWSIAM { + attrs := &workloadidentityv1pb.JoinAttrsAWSIAM{ + Account: c.Account, + Arn: c.Arn, + } + + return attrs +} + // getCallerIdentityReponse is used for JSON parsing type getCallerIdentityResponse struct { GetCallerIdentityResult awsIdentity `json:"GetCallerIdentityResult"` @@ -260,41 +273,48 @@ func checkIAMAllowRules(identity *awsIdentity, token string, allowRules []*types // checkIAMRequest checks if the given request satisfies the token rules and // included the required challenge. -func (a *Server) checkIAMRequest(ctx context.Context, challenge string, req *proto.RegisterUsingIAMMethodRequest, cfg *iamRegisterConfig) error { +// +// If the joining entity presents a valid IAM identity, this will be returned, +// even if the identity does not match the token's allow rules. This is to +// support inclusion in audit logs. +func (a *Server) checkIAMRequest(ctx context.Context, challenge string, req *proto.RegisterUsingIAMMethodRequest, cfg *iamRegisterConfig) (*awsIdentity, error) { tokenName := req.RegisterUsingTokenRequest.Token provisionToken, err := a.GetToken(ctx, tokenName) if err != nil { - return trace.Wrap(err, "getting token") + return nil, trace.Wrap(err, "getting token") } if provisionToken.GetJoinMethod() != types.JoinMethodIAM { - return trace.AccessDenied("this token does not support the IAM join method") + return nil, trace.AccessDenied("this token does not support the IAM join method") } // parse the incoming http request to the sts:GetCallerIdentity endpoint identityRequest, err := parseSTSRequest(req.StsIdentityRequest) if err != nil { - return trace.Wrap(err, "parsing STS request") + return nil, trace.Wrap(err, "parsing STS request") } // validate that the host, method, and headers are correct and the expected // challenge is included in the signed portion of the request if err := validateSTSIdentityRequest(identityRequest, challenge, cfg); err != nil { - return trace.Wrap(err, "validating STS request") + return nil, trace.Wrap(err, "validating STS request") } // send the signed request to the public AWS API and get the node identity // from the response identity, err := executeSTSIdentityRequest(ctx, a.httpClientForAWSSTS, identityRequest) if err != nil { - return trace.Wrap(err, "executing STS request") + return nil, trace.Wrap(err, "executing STS request") } // check that the node identity matches an allow rule for this token if err := checkIAMAllowRules(identity, provisionToken.GetName(), provisionToken.GetAllowRules()); err != nil { - return trace.Wrap(err, "checking allow rules") + // We return the identity since it's "validated" but does not match the + // rules. This allows us to include it in a failed join audit event + // as additional context to help the user understand why the join failed. + return identity, trace.Wrap(err, "checking allow rules") } - return nil + return identity, nil } func generateIAMChallenge() (string, error) { @@ -341,10 +361,13 @@ func (a *Server) RegisterUsingIAMMethodWithOpts( ) (certs *proto.Certs, err error) { var provisionToken types.ProvisionToken var joinRequest *types.RegisterUsingTokenRequest + var joinFailureMetadata any defer func() { // Emit a log message and audit event on join failure. if err != nil { - a.handleJoinFailure(ctx, err, provisionToken, nil, joinRequest) + a.handleJoinFailure( + ctx, err, provisionToken, joinFailureMetadata, joinRequest, + ) } }() @@ -375,15 +398,27 @@ func (a *Server) RegisterUsingIAMMethodWithOpts( } // check that the GetCallerIdentity request is valid and matches the token - if err := a.checkIAMRequest(ctx, challenge, req, cfg); err != nil { + verifiedIdentity, err := a.checkIAMRequest(ctx, challenge, req, cfg) + if verifiedIdentity != nil { + joinFailureMetadata = verifiedIdentity + } + if err != nil { return nil, trace.Wrap(err, "checking iam request") } if req.RegisterUsingTokenRequest.Role == types.RoleBot { - certs, err := a.generateCertsBot(ctx, provisionToken, req.RegisterUsingTokenRequest, nil) + certs, err := a.generateCertsBot( + ctx, + provisionToken, + req.RegisterUsingTokenRequest, + verifiedIdentity, + &workloadidentityv1pb.JoinAttrs{ + Iam: verifiedIdentity.JoinAttrs(), + }, + ) return certs, trace.Wrap(err, "generating bot certs") } - certs, err = a.generateCerts(ctx, provisionToken, req.RegisterUsingTokenRequest, nil) + certs, err = a.generateCerts(ctx, provisionToken, req.RegisterUsingTokenRequest, verifiedIdentity) return certs, trace.Wrap(err, "generating certs") } diff --git a/lib/auth/join_tpm.go b/lib/auth/join_tpm.go index 12463e8ecd811..df2e6b4e4cbcc 100644 --- a/lib/auth/join_tpm.go +++ b/lib/auth/join_tpm.go @@ -28,6 +28,7 @@ import ( "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/proto" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/tpm" @@ -39,11 +40,13 @@ func (a *Server) RegisterUsingTPMMethod( solveChallenge client.RegisterTPMChallengeResponseFunc, ) (_ *proto.Certs, err error) { var provisionToken types.ProvisionToken - var attributeSrc joinAttributeSourcer + var joinFailureMetadata any defer func() { // Emit a log message and audit event on join failure. if err != nil { - a.handleJoinFailure(ctx, err, provisionToken, attributeSrc, initReq.JoinRequest) + a.handleJoinFailure( + ctx, err, provisionToken, joinFailureMetadata, initReq.JoinRequest, + ) } }() @@ -97,10 +100,12 @@ func (a *Server) RegisterUsingTPMMethod( return solution.Solution, nil }, }) + if validatedEK != nil { + joinFailureMetadata = validatedEK + } if err != nil { return nil, trace.Wrap(err, "validating TPM EK") } - attributeSrc = validatedEK if err := checkTPMAllowRules(validatedEK, ptv2.Spec.TPM.Allow); err != nil { return nil, trace.Wrap(err) @@ -108,7 +113,13 @@ func (a *Server) RegisterUsingTPMMethod( if initReq.JoinRequest.Role == types.RoleBot { certs, err := a.generateCertsBot( - ctx, ptv2, initReq.JoinRequest, validatedEK, + ctx, + ptv2, + initReq.JoinRequest, + validatedEK, + &workloadidentityv1pb.JoinAttrs{ + Tpm: validatedEK.JoinAttrs(), + }, ) return certs, trace.Wrap(err, "generating certs for bot") } diff --git a/lib/auth/machineid/workloadidentityv1/decision_test.go b/lib/auth/machineid/workloadidentityv1/decision_test.go index e8cb267bb0879..5d00bf7595669 100644 --- a/lib/auth/machineid/workloadidentityv1/decision_test.go +++ b/lib/auth/machineid/workloadidentityv1/decision_test.go @@ -95,6 +95,23 @@ func Test_getFieldStringValue(t *testing.T) { want: "jeff", requireErr: require.NoError, }, + { + // This test ensures that the proto name (e.g service_account) is + // used instead of the Go name (e.g serviceAccount). + name: "underscored", + in: &workloadidentityv1pb.Attrs{ + Join: &workloadidentityv1pb.JoinAttrs{ + Kubernetes: &workloadidentityv1pb.JoinAttrsKubernetes{ + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Namespace: "default", + }, + }, + }, + }, + attr: "join.kubernetes.service_account.namespace", + want: "default", + requireErr: require.NoError, + }, { name: "bool", in: &workloadidentityv1pb.Attrs{ diff --git a/lib/auth/machineid/workloadidentityv1/issuer_service.go b/lib/auth/machineid/workloadidentityv1/issuer_service.go index eb75befe32b0b..6842ae01632ec 100644 --- a/lib/auth/machineid/workloadidentityv1/issuer_service.go +++ b/lib/auth/machineid/workloadidentityv1/issuer_service.go @@ -135,6 +135,7 @@ func (s *IssuanceService) deriveAttrs( BotName: authzCtx.Identity.GetIdentity().BotName, Labels: authzCtx.User.GetAllLabels(), }, + Join: authzCtx.Identity.GetIdentity().JoinAttributes, } return attrs, nil diff --git a/lib/auth/machineid/workloadidentityv1/workloadidentityv1_test.go b/lib/auth/machineid/workloadidentityv1/workloadidentityv1_test.go index 3f615c2749c89..e5f23dc96216c 100644 --- a/lib/auth/machineid/workloadidentityv1/workloadidentityv1_test.go +++ b/lib/auth/machineid/workloadidentityv1/workloadidentityv1_test.go @@ -19,6 +19,7 @@ package workloadidentityv1_test import ( "context" "crypto" + "crypto/tls" "crypto/x509" "errors" "fmt" @@ -34,23 +35,32 @@ import ( "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "github.com/stretchr/testify/require" + "golang.org/x/crypto/ssh" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/durationpb" + apiproto "github.com/gravitational/teleport/api/client/proto" headerv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/header/v1" + machineidv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/machineid/v1" workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/events" + apiutils "github.com/gravitational/teleport/api/utils" + "github.com/gravitational/teleport/api/utils/keys" + "github.com/gravitational/teleport/integrations/lib/testing/fakejoin" "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/auth/authclient" + "github.com/gravitational/teleport/lib/auth/join" "github.com/gravitational/teleport/lib/auth/machineid/workloadidentityv1/experiment" + "github.com/gravitational/teleport/lib/auth/state" "github.com/gravitational/teleport/lib/cryptosuites" libevents "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/events/eventstest" libjwt "github.com/gravitational/teleport/lib/jwt" "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/utils" ) func TestMain(m *testing.M) { @@ -137,6 +147,193 @@ func newIssuanceTestPack(t *testing.T, ctx context.Context) *issuanceTestPack { } } +// TestIssueWorkloadIdentityE2E performs a more E2E test than the RPC specific +// tests in this package. The idea is to validate that the various Auth Server +// APIs necessary for a bot to join and then issue a workload identity are +// functioning correctly. +func TestIssueWorkloadIdentityE2E(t *testing.T) { + experimentStatus := experiment.Enabled() + defer experiment.SetEnabled(experimentStatus) + experiment.SetEnabled(true) + + ctx := context.Background() + tp := newIssuanceTestPack(t, ctx) + + role, err := types.NewRole("my-role", types.RoleSpecV6{ + Allow: types.RoleConditions{ + Rules: []types.Rule{ + types.NewRule(types.KindWorkloadIdentity, []string{types.VerbRead, types.VerbList}), + }, + WorkloadIdentityLabels: map[string]apiutils.Strings{ + "my-label": []string{"my-value"}, + }, + }, + }) + require.NoError(t, err) + + wid, err := tp.srv.Auth().CreateWorkloadIdentity(ctx, &workloadidentityv1pb.WorkloadIdentity{ + Kind: types.KindWorkloadIdentity, + Version: types.V1, + Metadata: &headerv1.Metadata{ + Name: "my-wid", + Labels: map[string]string{ + "my-label": "my-value", + }, + }, + Spec: &workloadidentityv1pb.WorkloadIdentitySpec{ + Rules: &workloadidentityv1pb.WorkloadIdentityRules{ + Allow: []*workloadidentityv1pb.WorkloadIdentityRule{ + { + Conditions: []*workloadidentityv1pb.WorkloadIdentityCondition{ + { + Attribute: "join.kubernetes.service_account.namespace", + Equals: "my-namespace", + }, + }, + }, + }, + }, + Spiffe: &workloadidentityv1pb.WorkloadIdentitySPIFFE{ + Id: "/example/{{ user.name }}/{{ join.kubernetes.service_account.namespace }}/{{ join.kubernetes.pod.name }}/{{ workload.unix.pid }}", + }, + }, + }) + require.NoError(t, err) + + bot := &machineidv1.Bot{ + Kind: types.KindBot, + Version: types.V1, + Metadata: &headerv1.Metadata{ + Name: "my-bot", + }, + Spec: &machineidv1.BotSpec{ + Roles: []string{ + role.GetName(), + }, + }, + } + + k8s, err := fakejoin.NewKubernetesSigner(tp.clock) + require.NoError(t, err) + jwks, err := k8s.GetMarshaledJWKS() + require.NoError(t, err) + fakePSAT, err := k8s.SignServiceAccountJWT( + "my-pod", + "my-namespace", + "my-service-account", + tp.srv.ClusterName(), + ) + require.NoError(t, err) + + token, err := types.NewProvisionTokenFromSpec( + "my-k8s-token", + time.Time{}, + types.ProvisionTokenSpecV2{ + Roles: types.SystemRoles{types.RoleBot}, + JoinMethod: types.JoinMethodKubernetes, + BotName: bot.Metadata.Name, + Kubernetes: &types.ProvisionTokenSpecV2Kubernetes{ + Type: types.KubernetesJoinTypeStaticJWKS, + StaticJWKS: &types.ProvisionTokenSpecV2Kubernetes_StaticJWKSConfig{ + JWKS: jwks, + }, + Allow: []*types.ProvisionTokenSpecV2Kubernetes_Rule{ + { + ServiceAccount: "my-namespace:my-service-account", + }, + }, + }, + }, + ) + require.NoError(t, err) + + adminClient, err := tp.srv.NewClient(auth.TestAdmin()) + require.NoError(t, err) + _, err = adminClient.CreateRole(ctx, role) + require.NoError(t, err) + _, err = adminClient.BotServiceClient().CreateBot(ctx, &machineidv1.CreateBotRequest{ + Bot: bot, + }) + require.NoError(t, err) + err = adminClient.CreateToken(ctx, token) + require.NoError(t, err) + + // With the basic setup complete, we can now "fake" a join. + botCerts, err := join.Register(ctx, join.RegisterParams{ + Token: token.GetName(), + JoinMethod: types.JoinMethodKubernetes, + ID: state.IdentityID{ + Role: types.RoleBot, + }, + AuthServers: []utils.NetAddr{*utils.MustParseAddr(tp.srv.Addr().String())}, + KubernetesReadFileFunc: func(name string) ([]byte, error) { + return []byte(fakePSAT), nil + }, + }) + require.NoError(t, err) + + // We now have to actually impersonate the role cert to be able to issue + // a workload identity. + privateKeyPEM, err := keys.MarshalPrivateKey(botCerts.PrivateKey) + require.NoError(t, err) + tlsCert, err := tls.X509KeyPair(botCerts.Certs.TLS, privateKeyPEM) + require.NoError(t, err) + sshPub, err := ssh.NewPublicKey(botCerts.PrivateKey.Public()) + require.NoError(t, err) + tlsPub, err := keys.MarshalPublicKey(botCerts.PrivateKey.Public()) + require.NoError(t, err) + botClient := tp.srv.NewClientWithCert(tlsCert) + certs, err := botClient.GenerateUserCerts(ctx, apiproto.UserCertsRequest{ + SSHPublicKey: ssh.MarshalAuthorizedKey(sshPub), + TLSPublicKey: tlsPub, + Username: "bot-my-bot", + RoleRequests: []string{ + role.GetName(), + }, + UseRoleRequests: true, + Expires: tp.clock.Now().Add(time.Hour), + }) + require.NoError(t, err) + roleTLSCert, err := tls.X509KeyPair(certs.TLS, privateKeyPEM) + require.NoError(t, err) + roleClient := tp.srv.NewClientWithCert(roleTLSCert) + + // Generate a keypair to generate x509 SVIDs for. + workloadKey, err := cryptosuites.GenerateKeyWithAlgorithm(cryptosuites.ECDSAP256) + require.NoError(t, err) + workloadKeyPubBytes, err := x509.MarshalPKIXPublicKey(workloadKey.Public()) + require.NoError(t, err) + // Finally, we can request the issuance of a SVID + c := workloadidentityv1pb.NewWorkloadIdentityIssuanceServiceClient( + roleClient.GetConnection(), + ) + res, err := c.IssueWorkloadIdentity(ctx, &workloadidentityv1pb.IssueWorkloadIdentityRequest{ + Name: wid.Metadata.Name, + WorkloadAttrs: &workloadidentityv1pb.WorkloadAttrs{ + Unix: &workloadidentityv1pb.WorkloadAttrsUnix{ + Pid: 123, + }, + }, + Credential: &workloadidentityv1pb.IssueWorkloadIdentityRequest_X509SvidParams{ + X509SvidParams: &workloadidentityv1pb.X509SVIDParams{ + PublicKey: workloadKeyPubBytes, + }, + }, + }) + require.NoError(t, err) + + // Perform a minimal validation of the returned credential - enough to prove + // that the returned value is a valid SVID with the SPIFFE ID we expect. + // Other tests in this package validate this more fully. + x509SVID := res.GetCredential().GetX509Svid() + require.NotNil(t, x509SVID) + cert, err := x509.ParseCertificate(x509SVID.GetCert()) + require.NoError(t, err) + // Check included public key matches + require.Equal(t, workloadKey.Public(), cert.PublicKey) + require.Equal(t, "spiffe://localhost/example/bot-my-bot/my-namespace/my-pod/123", cert.URIs[0].String()) +} + func TestIssueWorkloadIdentity(t *testing.T) { experimentStatus := experiment.Enabled() defer experiment.SetEnabled(experimentStatus) diff --git a/lib/bitbucket/bitbucket.go b/lib/bitbucket/bitbucket.go index ee9923337f9e8..653d724c1a971 100644 --- a/lib/bitbucket/bitbucket.go +++ b/lib/bitbucket/bitbucket.go @@ -19,8 +19,7 @@ package bitbucket import ( - "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) // IDTokenClaims @@ -60,19 +59,17 @@ type IDTokenClaims struct { BranchName string `json:"branchName"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *IDTokenClaims) JoinAuditAttributes() (map[string]any, error) { - res := map[string]any{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - }) - if err != nil { - return nil, trace.Wrap(err) +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *IDTokenClaims) JoinAttrs() *workloadidentityv1pb.JoinAttrsBitbucket { + return &workloadidentityv1pb.JoinAttrsBitbucket{ + Sub: c.Sub, + StepUuid: c.StepUUID, + RepositoryUuid: c.RepositoryUUID, + PipelineUuid: c.PipelineUUID, + WorkspaceUuid: c.WorkspaceUUID, + DeploymentEnvironmentUuid: c.DeploymentEnvironmentUUID, + BranchName: c.BranchName, } - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) - } - return res, nil } diff --git a/lib/circleci/circleci.go b/lib/circleci/circleci.go index 0f0c351c5eae3..ef796322d5220 100644 --- a/lib/circleci/circleci.go +++ b/lib/circleci/circleci.go @@ -32,8 +32,7 @@ package circleci import ( "fmt" - "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) const IssuerURLTemplate = "https://oidc.circleci.com/org/%s" @@ -55,20 +54,13 @@ type IDTokenClaims struct { ProjectID string `json:"oidc.circleci.com/project-id"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *IDTokenClaims) JoinAuditAttributes() (map[string]interface{}, error) { - res := map[string]interface{}{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - }) - if err != nil { - return nil, trace.Wrap(err) +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *IDTokenClaims) JoinAttrs() *workloadidentityv1pb.JoinAttrsCircleCI { + return &workloadidentityv1pb.JoinAttrsCircleCI{ + Sub: c.Sub, + ContextIds: c.ContextIDs, + ProjectId: c.ProjectID, } - - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) - } - return res, nil } diff --git a/lib/gcp/gcp.go b/lib/gcp/gcp.go index 4fd77ca6a4f52..a1ab7eb9daafa 100644 --- a/lib/gcp/gcp.go +++ b/lib/gcp/gcp.go @@ -19,8 +19,7 @@ package gcp import ( - "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) // defaultIssuerHost is the issuer for GCP ID tokens. @@ -52,20 +51,21 @@ type IDTokenClaims struct { Google Google `json:"google"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *IDTokenClaims) JoinAuditAttributes() (map[string]interface{}, error) { - res := map[string]interface{}{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - }) - if err != nil { - return nil, trace.Wrap(err) +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *IDTokenClaims) JoinAttrs() *workloadidentityv1pb.JoinAttrsGCP { + attrs := &workloadidentityv1pb.JoinAttrsGCP{ + ServiceAccount: c.Email, } - - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) + if c.Google.ComputeEngine.InstanceName != "" { + attrs.Gce = &workloadidentityv1pb.JoinAttrsGCPGCE{ + Project: c.Google.ComputeEngine.ProjectID, + Zone: c.Google.ComputeEngine.Zone, + Id: c.Google.ComputeEngine.InstanceID, + Name: c.Google.ComputeEngine.InstanceName, + } } - return res, nil + + return attrs } diff --git a/lib/githubactions/githubactions.go b/lib/githubactions/githubactions.go index f2921a9636d18..c2642904c6990 100644 --- a/lib/githubactions/githubactions.go +++ b/lib/githubactions/githubactions.go @@ -19,8 +19,7 @@ package githubactions import ( - "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) // GitHub Workload Identity @@ -101,20 +100,23 @@ type IDTokenClaims struct { Workflow string `json:"workflow"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *IDTokenClaims) JoinAuditAttributes() (map[string]interface{}, error) { - res := map[string]interface{}{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - }) - if err != nil { - return nil, trace.Wrap(err) +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *IDTokenClaims) JoinAttrs() *workloadidentityv1pb.JoinAttrsGitHub { + attrs := &workloadidentityv1pb.JoinAttrsGitHub{ + Sub: c.Sub, + Actor: c.Actor, + Environment: c.Environment, + Ref: c.Ref, + RefType: c.RefType, + Repository: c.Repository, + RepositoryOwner: c.RepositoryOwner, + Workflow: c.Workflow, + EventName: c.EventName, + Sha: c.SHA, + RunId: c.RunID, } - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) - } - return res, nil + return attrs } diff --git a/lib/gitlab/gitlab.go b/lib/gitlab/gitlab.go index 1129e6509d6c3..9daf1c4a68d8d 100644 --- a/lib/gitlab/gitlab.go +++ b/lib/gitlab/gitlab.go @@ -19,8 +19,7 @@ package gitlab import ( - "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) // GitLab Workload Identity @@ -112,20 +111,28 @@ type IDTokenClaims struct { ProjectVisibility string `json:"project_visibility"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *IDTokenClaims) JoinAuditAttributes() (map[string]interface{}, error) { - res := map[string]interface{}{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - }) - if err != nil { - return nil, trace.Wrap(err) +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *IDTokenClaims) JoinAttrs() *workloadidentityv1pb.JoinAttrsGitLab { + attrs := &workloadidentityv1pb.JoinAttrsGitLab{ + Sub: c.Sub, + Ref: c.Ref, + RefType: c.RefType, + RefProtected: c.RefProtected == "true", + NamespacePath: c.NamespacePath, + ProjectPath: c.ProjectPath, + UserLogin: c.UserLogin, + UserEmail: c.UserEmail, + PipelineId: c.PipelineID, + Environment: c.Environment, + EnvironmentProtected: c.EnvironmentProtected == "true", + RunnerId: int64(c.RunnerID), + RunnerEnvironment: c.RunnerEnvironment, + Sha: c.SHA, + CiConfigRefUri: c.CIConfigRefURI, + CiConfigSha: c.CIConfigSHA, } - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) - } - return res, nil + return attrs } diff --git a/lib/kube/token/validator.go b/lib/kube/token/validator.go index 056b5ee1def0d..0d88af8d46735 100644 --- a/lib/kube/token/validator.go +++ b/lib/kube/token/validator.go @@ -29,13 +29,13 @@ import ( "github.com/go-jose/go-jose/v3" josejwt "github.com/go-jose/go-jose/v3/jwt" "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils" ) @@ -60,24 +60,14 @@ type ValidationResult struct { // This will be prepended with `system:serviceaccount:` for service // accounts. Username string `json:"username"` + attrs *workloadidentityv1pb.JoinAttrsKubernetes } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *ValidationResult) JoinAuditAttributes() (map[string]interface{}, error) { - res := map[string]interface{}{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - Squash: true, - }) - if err != nil { - return nil, trace.Wrap(err) - } - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) - } - return res, nil +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *ValidationResult) JoinAttrs() *workloadidentityv1pb.JoinAttrsKubernetes { + return c.attrs } // TokenReviewValidator validates a Kubernetes Service Account JWT using the @@ -180,8 +170,11 @@ func (v *TokenReviewValidator) Validate(ctx context.Context, token, clusterName // Check the Username is a service account. // A user token would not match rules anyway, but we can produce a more relevant error message here. - if !strings.HasPrefix(reviewResult.Status.User.Username, ServiceAccountNamePrefix) { - return nil, trace.BadParameter("token user is not a service account: %s", reviewResult.Status.User.Username) + namespace, serviceAccount, err := serviceAccountFromUsername( + reviewResult.Status.User.Username, + ) + if err != nil { + return nil, trace.Wrap(err) } if !slices.Contains(reviewResult.Status.User.Groups, serviceAccountGroup) { @@ -203,20 +196,47 @@ func (v *TokenReviewValidator) Validate(ctx context.Context, token, clusterName // We know if the token is bound to a pod if its name is in the Extra userInfo. // If the token is not bound while Kubernetes supports bound tokens we abort. - if _, ok := reviewResult.Status.User.Extra[extraDataPodNameField]; !ok && boundTokenSupport { + podName, podNamePresent := reviewResult.Status.User.Extra[extraDataPodNameField] + if !podNamePresent && boundTokenSupport { return nil, trace.BadParameter( "legacy SA tokens are not accepted as kubernetes version %s supports bound tokens", kubeVersion.String(), ) } + attrs := &workloadidentityv1pb.JoinAttrsKubernetes{ + Subject: reviewResult.Status.User.Username, + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Name: serviceAccount, + Namespace: namespace, + }, + } + if podNamePresent && len(podName) == 1 { + attrs.Pod = &workloadidentityv1pb.JoinAttrsKubernetesPod{ + Name: podName[0], + } + } + return &ValidationResult{ Raw: reviewResult.Status, Type: types.KubernetesJoinTypeInCluster, Username: reviewResult.Status.User.Username, + attrs: attrs, }, nil } +func serviceAccountFromUsername(username string) (namespace, name string, err error) { + cut, hasPrefix := strings.CutPrefix(username, ServiceAccountNamePrefix+":") + if !hasPrefix { + return "", "", trace.BadParameter("token user is not a service account: %s", username) + } + parts := strings.Split(cut, ":") + if len(parts) != 2 { + return "", "", trace.BadParameter("token user has malformed service account name: %s", username) + } + return parts[0], parts[1], nil +} + func kubernetesSupportsBoundTokens(gitVersion string) (bool, error) { kubeVersion, err := version.ParseSemantic(gitVersion) if err != nil { @@ -319,5 +339,15 @@ func ValidateTokenWithJWKS( Raw: claims, Type: types.KubernetesJoinTypeStaticJWKS, Username: claims.Subject, + attrs: &workloadidentityv1pb.JoinAttrsKubernetes{ + Subject: claims.Subject, + Pod: &workloadidentityv1pb.JoinAttrsKubernetesPod{ + Name: claims.Kubernetes.Pod.Name, + }, + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Name: claims.Kubernetes.ServiceAccount.Name, + Namespace: claims.Kubernetes.Namespace, + }, + }, }, nil } diff --git a/lib/kube/token/validator_test.go b/lib/kube/token/validator_test.go index 49054df1e47ee..70d68fddb766d 100644 --- a/lib/kube/token/validator_test.go +++ b/lib/kube/token/validator_test.go @@ -26,9 +26,12 @@ import ( "github.com/go-jose/go-jose/v3" "github.com/go-jose/go-jose/v3/jwt" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/gravitational/trace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" v1 "k8s.io/api/authentication/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" @@ -37,6 +40,7 @@ import ( "k8s.io/client-go/kubernetes/fake" ctest "k8s.io/client-go/testing" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cryptosuites" ) @@ -168,6 +172,7 @@ func TestIDTokenValidator_Validate(t *testing.T) { review *v1.TokenReview kubeVersion *version.Info wantResult *ValidationResult + wantAttrs *workloadidentityv1pb.JoinAttrsKubernetes clusterAudiences []string expectedAudiences []string expectedError error @@ -196,6 +201,16 @@ func TestIDTokenValidator_Validate(t *testing.T) { Username: "system:serviceaccount:namespace:my-service-account", // Raw will be filled in during test run to value of review }, + wantAttrs: &workloadidentityv1pb.JoinAttrsKubernetes{ + Subject: "system:serviceaccount:namespace:my-service-account", + Pod: &workloadidentityv1pb.JoinAttrsKubernetesPod{ + Name: "podA", + }, + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Name: "my-service-account", + Namespace: "namespace", + }, + }, kubeVersion: &boundTokenKubernetesVersion, expectedError: nil, // As the cluster doesn't have default audiences, we should not set @@ -226,6 +241,16 @@ func TestIDTokenValidator_Validate(t *testing.T) { Username: "system:serviceaccount:namespace:my-service-account", // Raw will be filled in during test run to value of review }, + wantAttrs: &workloadidentityv1pb.JoinAttrsKubernetes{ + Subject: "system:serviceaccount:namespace:my-service-account", + Pod: &workloadidentityv1pb.JoinAttrsKubernetesPod{ + Name: "podA", + }, + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Name: "my-service-account", + Namespace: "namespace", + }, + }, kubeVersion: &boundTokenKubernetesVersion, expectedError: nil, clusterAudiences: defaultKubeAudiences, @@ -253,6 +278,13 @@ func TestIDTokenValidator_Validate(t *testing.T) { Username: "system:serviceaccount:namespace:my-service-account", // Raw will be filled in during test run to value of review }, + wantAttrs: &workloadidentityv1pb.JoinAttrsKubernetes{ + Subject: "system:serviceaccount:namespace:my-service-account", + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Name: "my-service-account", + Namespace: "namespace", + }, + }, kubeVersion: &legacyTokenKubernetesVersion, expectedError: nil, }, @@ -352,7 +384,19 @@ func TestIDTokenValidator_Validate(t *testing.T) { return } require.NoError(t, err) - require.Equal(t, tt.wantResult, result) + require.Empty(t, cmp.Diff( + tt.wantResult, + result, + cmpopts.IgnoreUnexported(ValidationResult{}), + )) + if tt.wantAttrs != nil { + gotAttrs := result.JoinAttrs() + require.Empty(t, cmp.Diff( + tt.wantAttrs, + gotAttrs, + protocmp.Transform(), + )) + } }) } } @@ -440,6 +484,7 @@ func TestValidateTokenWithJWKS(t *testing.T) { claims ServiceAccountClaims wantResult *ValidationResult + wantAttrs *workloadidentityv1pb.JoinAttrsKubernetes wantErr string }{ { @@ -459,6 +504,16 @@ func TestValidateTokenWithJWKS(t *testing.T) { Type: types.KubernetesJoinTypeStaticJWKS, Username: "system:serviceaccount:default:my-service-account", }, + wantAttrs: &workloadidentityv1pb.JoinAttrsKubernetes{ + Subject: "system:serviceaccount:default:my-service-account", + Pod: &workloadidentityv1pb.JoinAttrsKubernetesPod{ + Name: "my-pod-797959fdf-wptbj", + }, + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Name: "my-service-account", + Namespace: "default", + }, + }, }, { name: "missing bound pod claim", @@ -607,7 +662,19 @@ func TestValidateTokenWithJWKS(t *testing.T) { return } require.NoError(t, err) - require.Equal(t, tt.wantResult, result) + require.Empty(t, cmp.Diff( + tt.wantResult, + result, + cmpopts.IgnoreUnexported(ValidationResult{}), + )) + if tt.wantAttrs != nil { + gotAttrs := result.JoinAttrs() + require.Empty(t, cmp.Diff( + tt.wantAttrs, + gotAttrs, + protocmp.Transform(), + )) + } }) } } diff --git a/lib/spacelift/spacelift.go b/lib/spacelift/spacelift.go index ddaba2f11cfd2..413620e324ae4 100644 --- a/lib/spacelift/spacelift.go +++ b/lib/spacelift/spacelift.go @@ -19,8 +19,7 @@ package spacelift import ( - "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) // IDTokenClaims @@ -49,20 +48,17 @@ type IDTokenClaims struct { Scope string `json:"scope"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *IDTokenClaims) JoinAuditAttributes() (map[string]interface{}, error) { - res := map[string]interface{}{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - }) - if err != nil { - return nil, trace.Wrap(err) +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *IDTokenClaims) JoinAttrs() *workloadidentityv1pb.JoinAttrsSpacelift { + return &workloadidentityv1pb.JoinAttrsSpacelift{ + Sub: c.Sub, + SpaceId: c.SpaceID, + CallerType: c.CallerType, + CallerId: c.CallerID, + RunType: c.RunType, + RunId: c.RunID, + Scope: c.Scope, } - - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) - } - return res, nil } diff --git a/lib/terraformcloud/terraform.go b/lib/terraformcloud/terraform.go index ded2340c2e5d1..c9db802130ae2 100644 --- a/lib/terraformcloud/terraform.go +++ b/lib/terraformcloud/terraform.go @@ -19,8 +19,7 @@ package terraformcloud import ( - "github.com/gravitational/trace" - "github.com/mitchellh/mapstructure" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) // IDTokenClaims @@ -52,20 +51,17 @@ type IDTokenClaims struct { RunPhase string `json:"terraform_run_phase"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *IDTokenClaims) JoinAuditAttributes() (map[string]interface{}, error) { - res := map[string]interface{}{} - d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - TagName: "json", - Result: &res, - }) - if err != nil { - return nil, trace.Wrap(err) +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *IDTokenClaims) JoinAttrs() *workloadidentityv1pb.JoinAttrsTerraformCloud { + return &workloadidentityv1pb.JoinAttrsTerraformCloud{ + Sub: c.Sub, + OrganizationName: c.OrganizationName, + ProjectName: c.ProjectName, + WorkspaceName: c.WorkspaceName, + FullWorkspace: c.FullWorkspace, + RunId: c.RunID, + RunPhase: c.RunPhase, } - - if err := d.Decode(c); err != nil { - return nil, trace.Wrap(err) - } - return res, nil } diff --git a/lib/tlsca/ca.go b/lib/tlsca/ca.go index 3edde794e5860..a7e6ad24e39e4 100644 --- a/lib/tlsca/ca.go +++ b/lib/tlsca/ca.go @@ -36,8 +36,10 @@ import ( "github.com/gravitational/trace" "github.com/jonboulle/clockwork" + "google.golang.org/protobuf/encoding/protojson" "github.com/gravitational/teleport" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/api/types/wrappers" @@ -203,6 +205,10 @@ type Identity struct { // UserType indicates if the User was created by an SSO Provider or locally. UserType types.UserType + + // JoinAttributes holds the attributes that resulted from the + // Bot/Agent join process. + JoinAttributes *workloadidentityv1pb.JoinAttrs } // RouteToApp holds routing information for applications. @@ -556,6 +562,10 @@ var ( // BotInstanceASN1ExtensionOID is an extension that encodes a unique bot // instance identifier into a certificate. BotInstanceASN1ExtensionOID = asn1.ObjectIdentifier{1, 3, 9999, 2, 20} + + // JoinAttributesASN1ExtensionOID is an extension that encodes the + // attributes that resulted from the Bot/Agent join process. + JoinAttributesASN1ExtensionOID = asn1.ObjectIdentifier{1, 3, 9999, 2, 21} ) // Device Trust OIDs. @@ -895,6 +905,24 @@ func (id *Identity) Subject() (pkix.Name, error) { ) } + if id.JoinAttributes != nil { + encoded, err := protojson.MarshalOptions{ + // Use the proto field names as this is what we use in the + // templating engine and this being consistent for any user who + // inspects the cert is kind. + UseProtoNames: true, + }.Marshal(id.JoinAttributes) + if err != nil { + return pkix.Name{}, trace.Wrap(err, "encoding join attributes as protojson") + } + subject.ExtraNames = append(subject.ExtraNames, + pkix.AttributeTypeAndValue{ + Type: JoinAttributesASN1ExtensionOID, + Value: string(encoded), + }, + ) + } + // Device extensions. if devID := id.DeviceExtensions.DeviceID; devID != "" { subject.ExtraNames = append(subject.ExtraNames, pkix.AttributeTypeAndValue{ @@ -1158,6 +1186,19 @@ func FromSubject(subject pkix.Name, expires time.Time) (*Identity, error) { if val, ok := attr.Value.(string); ok { id.UserType = types.UserType(val) } + case attr.Type.Equal(JoinAttributesASN1ExtensionOID): + if val, ok := attr.Value.(string); ok { + id.JoinAttributes = &workloadidentityv1pb.JoinAttrs{} + unmarshaler := protojson.UnmarshalOptions{ + // We specifically want to DiscardUnknown or unmarshaling + // will fail if the proto message was issued by a newer + // auth server w/ new fields. + DiscardUnknown: true, + } + if err := unmarshaler.Unmarshal([]byte(val), id.JoinAttributes); err != nil { + return nil, trace.Wrap(err) + } + } } } diff --git a/lib/tlsca/ca_test.go b/lib/tlsca/ca_test.go index 022facef5d0cf..50295f1e7bcf9 100644 --- a/lib/tlsca/ca_test.go +++ b/lib/tlsca/ca_test.go @@ -34,8 +34,10 @@ import ( "github.com/jonboulle/clockwork" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" "github.com/gravitational/teleport" + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" apievents "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/api/utils/keys" "github.com/gravitational/teleport/lib/cryptosuites" @@ -154,6 +156,58 @@ func TestRenewableIdentity(t *testing.T) { require.True(t, parsed.Renewable) } +func TestJoinAttributes(t *testing.T) { + t.Parallel() + + clock := clockwork.NewFakeClock() + expires := clock.Now().Add(1 * time.Hour) + + ca, err := FromKeys([]byte(fixtures.TLSCACertPEM), []byte(fixtures.TLSCAKeyPEM)) + require.NoError(t, err) + + privateKey, err := cryptosuites.GenerateKeyWithAlgorithm(cryptosuites.ECDSAP256) + require.NoError(t, err) + + identity := Identity{ + Username: "bot-bernard", + Groups: []string{"bot-bernard"}, + BotName: "bernard", + BotInstanceID: "1234-5678", + Expires: expires, + JoinAttributes: &workloadidentityv1pb.JoinAttrs{ + Kubernetes: &workloadidentityv1pb.JoinAttrsKubernetes{ + ServiceAccount: &workloadidentityv1pb.JoinAttrsKubernetesServiceAccount{ + Namespace: "default", + Name: "foo", + }, + Pod: &workloadidentityv1pb.JoinAttrsKubernetesPod{ + Name: "bar", + }, + }, + }, + } + + subj, err := identity.Subject() + require.NoError(t, err) + require.NotNil(t, subj) + + certBytes, err := ca.GenerateCertificate(CertificateRequest{ + Clock: clock, + PublicKey: privateKey.Public(), + Subject: subj, + NotAfter: expires, + }) + require.NoError(t, err) + + cert, err := ParseCertificatePEM(certBytes) + require.NoError(t, err) + + parsed, err := FromSubject(cert.Subject, expires) + require.NoError(t, err) + require.NotNil(t, parsed) + require.Empty(t, cmp.Diff(parsed, &identity, protocmp.Transform())) +} + // TestKubeExtensions test ASN1 subject kubernetes extensions func TestKubeExtensions(t *testing.T) { clock := clockwork.NewFakeClock() diff --git a/lib/tpm/validate.go b/lib/tpm/validate.go index 268857d35e4ff..126133d31e644 100644 --- a/lib/tpm/validate.go +++ b/lib/tpm/validate.go @@ -27,6 +27,8 @@ import ( "github.com/google/go-attestation/attest" "github.com/gravitational/trace" + + workloadidentityv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/workloadidentity/v1" ) // ValidateParams are the parameters required to validate a TPM. @@ -63,14 +65,17 @@ type ValidatedTPM struct { EKCertVerified bool `json:"ek_cert_verified"` } -// JoinAuditAttributes returns a series of attributes that can be inserted into -// audit events related to a specific join. -func (c *ValidatedTPM) JoinAuditAttributes() (map[string]interface{}, error) { - return map[string]interface{}{ - "ek_pub_hash": c.EKPubHash, - "ek_cert_serial": c.EKCertSerial, - "ek_cert_verified": c.EKCertVerified, - }, nil +// JoinAttrs returns the protobuf representation of the attested identity. +// This is used for auditing and for evaluation of WorkloadIdentity rules and +// templating. +func (c *ValidatedTPM) JoinAttrs() *workloadidentityv1pb.JoinAttrsTPM { + attrs := &workloadidentityv1pb.JoinAttrsTPM{ + EkPubHash: c.EKPubHash, + EkCertSerial: c.EKCertSerial, + EkCertVerified: c.EKCertVerified, + } + + return attrs } // Validate takes the parameters from a remote TPM and performs the necessary diff --git a/tool/tctl/common/bots_command.go b/tool/tctl/common/bots_command.go index 1cd290cb1bcd2..fa8ffbf7861cd 100644 --- a/tool/tctl/common/bots_command.go +++ b/tool/tctl/common/bots_command.go @@ -588,7 +588,10 @@ func (c *BotsCommand) ListBotInstances(ctx context.Context, client *authclient.C ) joined := i.Status.InitialAuthentication.AuthenticatedAt.AsTime().Format(time.RFC3339) - initialJoinMethod := i.Status.InitialAuthentication.JoinMethod + initialJoinMethod := cmp.Or( + i.Status.InitialAuthentication.GetJoinAttrs().GetMeta().GetJoinMethod(), + i.Status.InitialAuthentication.JoinMethod, + ) lastSeen := i.Status.InitialAuthentication.AuthenticatedAt.AsTime() @@ -599,8 +602,12 @@ func (c *BotsCommand) ListBotInstances(ctx context.Context, client *authclient.C generation = fmt.Sprint(auth.Generation) - if auth.JoinMethod == initialJoinMethod { - joinMethod = auth.JoinMethod + authJM := cmp.Or( + auth.GetJoinAttrs().GetMeta().GetJoinMethod(), + auth.JoinMethod, + ) + if authJM == initialJoinMethod { + joinMethod = authJM } else { // If the join method changed, show the original method and latest joinMethod = fmt.Sprintf("%s (%s)", auth.JoinMethod, initialJoinMethod) @@ -844,9 +851,13 @@ func splitEntries(flag string) []string { func formatBotInstanceAuthentication(record *machineidv1pb.BotInstanceStatusAuthentication) string { table := asciitable.MakeHeadlessTable(2) table.AddRow([]string{"Authenticated At:", record.AuthenticatedAt.AsTime().Format(time.RFC3339)}) - table.AddRow([]string{"Join Method:", record.JoinMethod}) - table.AddRow([]string{"Join Token:", record.JoinToken}) - table.AddRow([]string{"Join Metadata:", record.Metadata.String()}) + table.AddRow([]string{"Join Method:", cmp.Or(record.GetJoinAttrs().GetMeta().GetJoinMethod(), record.JoinMethod)}) + table.AddRow([]string{"Join Token:", cmp.Or(record.GetJoinAttrs().GetMeta().GetJoinTokenName(), record.JoinToken)}) + var meta fmt.Stringer = record.Metadata + if attrs := record.GetJoinAttrs(); attrs != nil { + meta = attrs + } + table.AddRow([]string{"Join Metadata:", meta.String()}) table.AddRow([]string{"Generation:", fmt.Sprint(record.Generation)}) table.AddRow([]string{"Public Key:", fmt.Sprintf("<%d bytes>", len(record.PublicKey))}) From c07a3502a80012b9749a8011d3e35bdd67e56199 Mon Sep 17 00:00:00 2001 From: Tiago Silva Date: Wed, 8 Jan 2025 10:20:53 +0000 Subject: [PATCH 02/45] [chore] replace gitlab client with `gitlab.com/gitlab-org/api/client-go` (#50856) Gitlab moved the golang client under its organization and deprecated `github.com/xanzy/go-gitlab` in favour of `gitlab.com/gitlab-org/api/client-go`. The codebase is the same and it's a pure fork of the original client but it's now controlled by Gitlab. Signed-off-by: Tiago Silva --- e_imports.go | 1 + go.mod | 1 + go.sum | 2 ++ integrations/event-handler/go.mod | 1 + integrations/event-handler/go.sum | 2 ++ integrations/terraform/go.mod | 1 + integrations/terraform/go.sum | 2 ++ 7 files changed, 10 insertions(+) diff --git a/e_imports.go b/e_imports.go index 32eb84c7ff814..4e7d22b5563d5 100644 --- a/e_imports.go +++ b/e_imports.go @@ -114,6 +114,7 @@ import ( _ "github.com/stretchr/testify/require" _ "github.com/vulcand/predicate/builder" _ "github.com/xanzy/go-gitlab" + _ "gitlab.com/gitlab-org/api/client-go" _ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" _ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/filters" _ "golang.org/x/crypto/bcrypt" diff --git a/go.mod b/go.mod index 37dbf691b118c..7ad47d17948f9 100644 --- a/go.mod +++ b/go.mod @@ -187,6 +187,7 @@ require ( github.com/vulcand/predicate v1.2.0 // replaced github.com/xanzy/go-gitlab v0.115.0 github.com/yusufpapurcu/wmi v1.2.4 + gitlab.com/gitlab-org/api/client-go v0.119.0 go.etcd.io/etcd/api/v3 v3.5.17 go.etcd.io/etcd/client/v3 v3.5.17 go.mongodb.org/mongo-driver v1.14.0 diff --git a/go.sum b/go.sum index eab1890d48d9a..8e7918257ef4e 100644 --- a/go.sum +++ b/go.sum @@ -2273,6 +2273,8 @@ github.com/zmap/zcrypto v0.0.0-20231219022726-a1f61fb1661c/go.mod h1:GSDpFDD4TAS github.com/zmap/zlint/v3 v3.0.0/go.mod h1:paGwFySdHIBEMJ61YjoqT4h7Ge+fdYG4sUQhnTb1lJ8= github.com/zmap/zlint/v3 v3.6.0 h1:vTEaDRtYN0d/1Ax60T+ypvbLQUHwHxbvYRnUMVr35ug= github.com/zmap/zlint/v3 v3.6.0/go.mod h1:NVgiIWssgzp0bNl8P4Gz94NHV2ep/4Jyj9V69uTmZyg= +gitlab.com/gitlab-org/api/client-go v0.119.0 h1:YBZyx9XUTtEDBBYtY36cZWz6JmT7om/8HPSk37IS95g= +gitlab.com/gitlab-org/api/client-go v0.119.0/go.mod h1:ygHmS3AU3TpvK+AC6DYO1QuAxLlv6yxYK+/Votr/WFQ= go.einride.tech/aip v0.68.0 h1:4seM66oLzTpz50u4K1zlJyOXQ3tCzcJN7I22tKkjipw= go.einride.tech/aip v0.68.0/go.mod h1:7y9FF8VtPWqpxuAxl0KQWqaULxW4zFIesD6zF5RIHHg= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= diff --git a/integrations/event-handler/go.mod b/integrations/event-handler/go.mod index 6e15b984279de..ac08f428d877b 100644 --- a/integrations/event-handler/go.mod +++ b/integrations/event-handler/go.mod @@ -270,6 +270,7 @@ require ( github.com/zeebo/errs v1.3.0 // indirect github.com/zmap/zcrypto v0.0.0-20231219022726-a1f61fb1661c // indirect github.com/zmap/zlint/v3 v3.6.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.119.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect diff --git a/integrations/event-handler/go.sum b/integrations/event-handler/go.sum index 0a7a3154df3d9..e8187beede135 100644 --- a/integrations/event-handler/go.sum +++ b/integrations/event-handler/go.sum @@ -1606,6 +1606,8 @@ github.com/zmap/zcrypto v0.0.0-20231219022726-a1f61fb1661c/go.mod h1:GSDpFDD4TAS github.com/zmap/zlint/v3 v3.0.0/go.mod h1:paGwFySdHIBEMJ61YjoqT4h7Ge+fdYG4sUQhnTb1lJ8= github.com/zmap/zlint/v3 v3.6.0 h1:vTEaDRtYN0d/1Ax60T+ypvbLQUHwHxbvYRnUMVr35ug= github.com/zmap/zlint/v3 v3.6.0/go.mod h1:NVgiIWssgzp0bNl8P4Gz94NHV2ep/4Jyj9V69uTmZyg= +gitlab.com/gitlab-org/api/client-go v0.119.0 h1:YBZyx9XUTtEDBBYtY36cZWz6JmT7om/8HPSk37IS95g= +gitlab.com/gitlab-org/api/client-go v0.119.0/go.mod h1:ygHmS3AU3TpvK+AC6DYO1QuAxLlv6yxYK+/Votr/WFQ= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod index 1130d2f21418f..6af471fc05d67 100644 --- a/integrations/terraform/go.mod +++ b/integrations/terraform/go.mod @@ -334,6 +334,7 @@ require ( github.com/zeebo/errs v1.3.0 // indirect github.com/zmap/zcrypto v0.0.0-20231219022726-a1f61fb1661c // indirect github.com/zmap/zlint/v3 v3.6.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.119.0 // indirect go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect diff --git a/integrations/terraform/go.sum b/integrations/terraform/go.sum index bb7db860bee43..71ace0ea97a94 100644 --- a/integrations/terraform/go.sum +++ b/integrations/terraform/go.sum @@ -1932,6 +1932,8 @@ github.com/zmap/zcrypto v0.0.0-20231219022726-a1f61fb1661c/go.mod h1:GSDpFDD4TAS github.com/zmap/zlint/v3 v3.0.0/go.mod h1:paGwFySdHIBEMJ61YjoqT4h7Ge+fdYG4sUQhnTb1lJ8= github.com/zmap/zlint/v3 v3.6.0 h1:vTEaDRtYN0d/1Ax60T+ypvbLQUHwHxbvYRnUMVr35ug= github.com/zmap/zlint/v3 v3.6.0/go.mod h1:NVgiIWssgzp0bNl8P4Gz94NHV2ep/4Jyj9V69uTmZyg= +gitlab.com/gitlab-org/api/client-go v0.119.0 h1:YBZyx9XUTtEDBBYtY36cZWz6JmT7om/8HPSk37IS95g= +gitlab.com/gitlab-org/api/client-go v0.119.0/go.mod h1:ygHmS3AU3TpvK+AC6DYO1QuAxLlv6yxYK+/Votr/WFQ= go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw= go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU= go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w= From a4c9470899aa2166da87f42b5c74e0c720e0a4f5 Mon Sep 17 00:00:00 2001 From: Taras <9948629+taraspos@users.noreply.github.com> Date: Wed, 8 Jan 2025 10:59:37 +0000 Subject: [PATCH 03/45] gha(update-docs-webhook): fix workflow syntax (#50778) * gha(update-docs-webhook): fix workflow syntax * Use `secret.NAME` syntax --- .github/workflows/update-docs-webhook.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update-docs-webhook.yaml b/.github/workflows/update-docs-webhook.yaml index 4c59c66039cbd..f1ade331c4276 100644 --- a/.github/workflows/update-docs-webhook.yaml +++ b/.github/workflows/update-docs-webhook.yaml @@ -18,7 +18,7 @@ jobs: steps: - name: Call deployment webhook env: - WEBHOOK_URL: ${{ secrets[AMPLIFY_DOCS_DEPLOY_HOOK] }} + WEBHOOK_URL: ${{ secrets.AMPLIFY_DOCS_DEPLOY_HOOK }} run: | if curl -X POST --silent --fail --show-error "$WEBHOOK_URL" > /dev/null; then echo "Triggered successfully" From e1dfb942d34fd33d685addaab24dc8ad7a4e974f Mon Sep 17 00:00:00 2001 From: Tiago Silva Date: Wed, 8 Jan 2025 11:44:22 +0000 Subject: [PATCH 04/45] [chore] remove `github.com/xanzy/go-gitlab` from direct depenencies (#50857) After deprecation of `github.com/xanzy/go-gitlab` in favour of `gitlab.com/gitlab-org/api/client-go`, this PR drops the direct dependency from Teleport module. The dependency is still indirect because it's imported by `cosign`. Hopefully in future versions, the dependency will be updated and fully removed. Signed-off-by: Tiago Silva --- e | 2 +- e_imports.go | 1 - go.mod | 2 +- integrations/event-handler/go.mod | 1 - integrations/event-handler/go.sum | 2 -- integrations/terraform/go.mod | 1 - integrations/terraform/go.sum | 2 -- 7 files changed, 2 insertions(+), 9 deletions(-) diff --git a/e b/e index f00dbc995fee6..1bc4a6909732d 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit f00dbc995fee6e9159442c62300026e32759e86a +Subproject commit 1bc4a6909732d3b8b98b19fab56d9a39f228ec01 diff --git a/e_imports.go b/e_imports.go index 4e7d22b5563d5..245bb8bd253d8 100644 --- a/e_imports.go +++ b/e_imports.go @@ -113,7 +113,6 @@ import ( _ "github.com/stretchr/testify/mock" _ "github.com/stretchr/testify/require" _ "github.com/vulcand/predicate/builder" - _ "github.com/xanzy/go-gitlab" _ "gitlab.com/gitlab-org/api/client-go" _ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" _ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/filters" diff --git a/go.mod b/go.mod index 7ad47d17948f9..7e6e10cd7d882 100644 --- a/go.mod +++ b/go.mod @@ -185,7 +185,6 @@ require ( github.com/stretchr/testify v1.10.0 github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb github.com/vulcand/predicate v1.2.0 // replaced - github.com/xanzy/go-gitlab v0.115.0 github.com/yusufpapurcu/wmi v1.2.4 gitlab.com/gitlab-org/api/client-go v0.119.0 go.etcd.io/etcd/api/v3 v3.5.17 @@ -514,6 +513,7 @@ require ( github.com/vbatts/tar-split v0.11.5 // indirect github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xanzy/go-gitlab v0.115.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect diff --git a/integrations/event-handler/go.mod b/integrations/event-handler/go.mod index ac08f428d877b..b7d0be9eb4684 100644 --- a/integrations/event-handler/go.mod +++ b/integrations/event-handler/go.mod @@ -261,7 +261,6 @@ require ( github.com/vulcand/predicate v1.2.0 // indirect github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xanzy/go-gitlab v0.115.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect diff --git a/integrations/event-handler/go.sum b/integrations/event-handler/go.sum index e8187beede135..e947423db8254 100644 --- a/integrations/event-handler/go.sum +++ b/integrations/event-handler/go.sum @@ -1557,8 +1557,6 @@ github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d h1:q80YK github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d/go.mod h1:vLdXKydr/OJssAXmjY0XBgLXUfivBMrNRIBljgtqCnw= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xanzy/go-gitlab v0.115.0 h1:6DmtItNcVe+At/liXSgfE/DZNZrGfalQmBRmOcJjOn8= -github.com/xanzy/go-gitlab v0.115.0/go.mod h1:5XCDtM7AM6WMKmfDdOiEpyRWUqui2iS9ILfvCZ2gJ5M= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod index 6af471fc05d67..81610ba23b74f 100644 --- a/integrations/terraform/go.mod +++ b/integrations/terraform/go.mod @@ -321,7 +321,6 @@ require ( github.com/vulcand/predicate v1.2.0 // indirect github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xanzy/go-gitlab v0.115.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect diff --git a/integrations/terraform/go.sum b/integrations/terraform/go.sum index 71ace0ea97a94..f996d8217a817 100644 --- a/integrations/terraform/go.sum +++ b/integrations/terraform/go.sum @@ -1866,8 +1866,6 @@ github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d h1:q80YK github.com/weppos/publicsuffix-go v0.30.3-0.20240510084413-5f1d03393b3d/go.mod h1:vLdXKydr/OJssAXmjY0XBgLXUfivBMrNRIBljgtqCnw= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xanzy/go-gitlab v0.115.0 h1:6DmtItNcVe+At/liXSgfE/DZNZrGfalQmBRmOcJjOn8= -github.com/xanzy/go-gitlab v0.115.0/go.mod h1:5XCDtM7AM6WMKmfDdOiEpyRWUqui2iS9ILfvCZ2gJ5M= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= From 26a0ac82228304dc4cd86538ac5048789319975b Mon Sep 17 00:00:00 2001 From: Bartosz Leper Date: Wed, 8 Jan 2025 14:33:06 +0100 Subject: [PATCH 05/45] Don't downgrade SSH port forwarding in roles for v18.0+ (#50645) --- lib/auth/grpcserver.go | 15 ++++++--------- lib/auth/grpcserver_test.go | 6 +++--- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/lib/auth/grpcserver.go b/lib/auth/grpcserver.go index d5dfc1f553f2b..4b1cdaef21422 100644 --- a/lib/auth/grpcserver.go +++ b/lib/auth/grpcserver.go @@ -2019,9 +2019,7 @@ func maybeDowngradeRole(ctx context.Context, role *types.RoleV6) (*types.RoleV6, return role, nil } -var minSupportedSSHPortForwardingVersions = map[int64]semver.Version{ - 17: {Major: 17, Minor: 1, Patch: 0}, -} +var minSupportedSSHPortForwardingVersion = semver.Version{Major: 17, Minor: 1, Patch: 0} func maybeDowngradeRoleSSHPortForwarding(role *types.RoleV6, clientVersion *semver.Version) *types.RoleV6 { sshPortForwarding := role.GetOptions().SSHPortForwarding @@ -2029,11 +2027,10 @@ func maybeDowngradeRoleSSHPortForwarding(role *types.RoleV6, clientVersion *semv return role } - minSupportedVersion, ok := minSupportedSSHPortForwardingVersions[clientVersion.Major] - if ok { - if supported, err := utils.MinVerWithoutPreRelease(clientVersion.String(), minSupportedVersion.String()); supported || err != nil { - return role - } + if supported, err := utils.MinVerWithoutPreRelease( + clientVersion.String(), + minSupportedSSHPortForwardingVersion.String()); supported || err != nil { + return role } role = apiutils.CloneProtoMsg(role) @@ -2044,7 +2041,7 @@ func maybeDowngradeRoleSSHPortForwarding(role *types.RoleV6, clientVersion *semv role.SetOptions(options) reason := fmt.Sprintf(`Client version %q does not support granular SSH port forwarding. Role %q will be downgraded `+ `to simple port forwarding rules instead. In order to support granular SSH port forwarding, all clients must be `+ - `updated to version %q or higher.`, clientVersion, role.GetName(), minSupportedVersion) + `updated to version %q or higher.`, clientVersion, role.GetName(), minSupportedSSHPortForwardingVersion) if role.Metadata.Labels == nil { role.Metadata.Labels = make(map[string]string, 1) } diff --git a/lib/auth/grpcserver_test.go b/lib/auth/grpcserver_test.go index c92e521e386c0..60ed4193c30ae 100644 --- a/lib/auth/grpcserver_test.go +++ b/lib/auth/grpcserver_test.go @@ -4741,7 +4741,7 @@ func TestRoleVersions(t *testing.T) { { desc: "up to date - enabled", clientVersions: []string{ - "17.1.0", "17.1.0-dev", "", + "17.1.0", "17.1.0-dev", "18.0.0-dev", "19.0.0", "", }, inputRole: enabledRole, expectedRole: enabledRole, @@ -4749,7 +4749,7 @@ func TestRoleVersions(t *testing.T) { { desc: "up to date - disabled", clientVersions: []string{ - "17.1.0", "17.1.0-dev", "", + "17.1.0", "17.1.0-dev", "18.0.0-dev", "19.0.0", "", }, inputRole: disabledRole, expectedRole: disabledRole, @@ -4757,7 +4757,7 @@ func TestRoleVersions(t *testing.T) { { desc: "up to date - undefined", clientVersions: []string{ - "17.1.0", "17.1.0-dev", "", + "17.1.0", "17.1.0-dev", "18.0.0-dev", "19.0.0", "", }, inputRole: undefinedRole, expectedRole: undefinedRole, From b90c1f6e24f4678ad9bdbd88c6eced061ca8f204 Mon Sep 17 00:00:00 2001 From: Steven Martin Date: Wed, 8 Jan 2025 08:43:50 -0500 Subject: [PATCH 06/45] Fix spelling for log and help (#50860) --- lib/auth/auth_with_roles.go | 4 ++-- lib/config/configuration.go | 2 +- lib/events/athena/athena.go | 2 +- lib/reversetunnel/transport.go | 4 ++-- lib/srv/db/cloud/meta.go | 2 +- lib/utils/host/hostusers.go | 2 +- lib/utils/unpack.go | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index fe50d3d0af68d..850f99f71bc12 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -2962,14 +2962,14 @@ func (a *ServerWithRoles) GetCurrentUserRoles(ctx context.Context) ([]types.Role func (a *ServerWithRoles) desiredAccessInfo(ctx context.Context, req *proto.UserCertsRequest, user types.User) (*services.AccessInfo, error) { if req.Username != a.context.User.GetName() { if isRoleImpersonation(*req) { - a.authServer.logger.WarnContext(ctx, "User tried to issue a cert for another user wjile adding role requests", + a.authServer.logger.WarnContext(ctx, "User tried to issue a cert for another user while adding role requests", "user", a.context.User.GetName(), "requested_user", req.Username, ) return nil, trace.AccessDenied("User %v tried to issue a cert for %v and added role requests. This is not supported.", a.context.User.GetName(), req.Username) } if len(req.AccessRequests) > 0 { - a.authServer.logger.WarnContext(ctx, "User tried to issue a cert for another user wihile adding access requests", + a.authServer.logger.WarnContext(ctx, "User tried to issue a cert for another user while adding access requests", "user", a.context.User.GetName(), "requested_user", req.Username, ) diff --git a/lib/config/configuration.go b/lib/config/configuration.go index 5955da7b93daa..546a48700d4fe 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -1920,7 +1920,7 @@ func readCACert(database *Database) ([]byte, error) { if database.CACertFile != "" { if database.TLS.CACertFile != "" { // New and old fields are set. Ignore the old field. - slog.WarnContext(context.Background(), "Ignoring deprecated ca_cert_file database in configuration; using tls.ca_cert_file", "dababase", database.Name) + slog.WarnContext(context.Background(), "Ignoring deprecated ca_cert_file database in configuration; using tls.ca_cert_file", "database", database.Name) } else { // Only old field is set, inform about deprecation. slog.WarnContext(context.Background(), "ca_cert_file is deprecated, please use tls.ca_cert_file instead for databases", "database", database.Name) diff --git a/lib/events/athena/athena.go b/lib/events/athena/athena.go index 601cfba96abc6..6d07b340ad4eb 100644 --- a/lib/events/athena/athena.go +++ b/lib/events/athena/athena.go @@ -617,7 +617,7 @@ func newAthenaMetrics(cfg athenaMetricsConfig) (*athenaMetrics, error) { prometheus.HistogramOpts{ Namespace: teleport.MetricNamespace, Name: teleport.MetricParquetlogConsumerDeleteEventsDuration, - Help: "Duration of delation of events on SQS in parquetlog", + Help: "Duration of deletion of events on SQS in parquetlog", // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 // highest bucket start of 0.001 sec * 2^15 == 32.768 sec Buckets: prometheus.ExponentialBuckets(0.001, 2, 16), diff --git a/lib/reversetunnel/transport.go b/lib/reversetunnel/transport.go index b3d338e9bb62b..eb4b60c9b6f21 100644 --- a/lib/reversetunnel/transport.go +++ b/lib/reversetunnel/transport.go @@ -160,7 +160,7 @@ func (p *transport) start() { p.logger.DebugContext(p.closeContext, "Received out-of-band proxy transport request", "target_address", dreq.Address, - "taget_server_id", dreq.ServerID, + "target_server_id", dreq.ServerID, "client_addr", dreq.ClientSrcAddr, ) @@ -414,7 +414,7 @@ func (p *transport) getConn(addr string, r *sshutils.DialReq) (net.Conn, bool, e } errTun := err - p.logger.DebugContext(p.closeContext, "Attempting to dial server directly", "taget_addr", addr) + p.logger.DebugContext(p.closeContext, "Attempting to dial server directly", "target_addr", addr) conn, err = p.directDial(addr) if err != nil { return nil, false, trace.ConnectionProblem(err, "failed dialing through tunnel (%v) or directly (%v)", errTun, err) diff --git a/lib/srv/db/cloud/meta.go b/lib/srv/db/cloud/meta.go index 031f9fb9dae4c..98e2280fb1db5 100644 --- a/lib/srv/db/cloud/meta.go +++ b/lib/srv/db/cloud/meta.go @@ -137,7 +137,7 @@ func (m *Metadata) updateAWS(ctx context.Context, database types.Database, fetch return trace.Wrap(err) } - m.logger.DebugContext(ctx, "Fetched metadata for dabase", "database", database, "metadata", logutils.StringerAttr(fetchedMeta)) + m.logger.DebugContext(ctx, "Fetched metadata for database", "database", database, "metadata", logutils.StringerAttr(fetchedMeta)) fetchedMeta.AssumeRoleARN = meta.AssumeRoleARN fetchedMeta.ExternalID = meta.ExternalID database.SetStatusAWS(*fetchedMeta) diff --git a/lib/utils/host/hostusers.go b/lib/utils/host/hostusers.go index de3ce20b5d69d..968cf82afc1ae 100644 --- a/lib/utils/host/hostusers.go +++ b/lib/utils/host/hostusers.go @@ -180,7 +180,7 @@ func UserDel(username string) (exitCode int, err error) { // userdel --remove (remove home) username cmd := exec.Command(userdelBin, args...) output, err := cmd.CombinedOutput() - slog.DebugContext(context.Background(), "usedel command completed", + slog.DebugContext(context.Background(), "userdel command completed", "command_path", cmd.Path, "output", string(output), ) diff --git a/lib/utils/unpack.go b/lib/utils/unpack.go index e42d38283eba2..35f76adbb452b 100644 --- a/lib/utils/unpack.go +++ b/lib/utils/unpack.go @@ -141,7 +141,7 @@ func extractFile(tarball *tar.Reader, header *tar.Header, dir string, dirMode os case tar.TypeSymlink: return writeSymbolicLink(filepath.Join(dir, header.Name), header.Linkname, dirMode) default: - slog.WarnContext(context.Background(), "Unsupported type flag for taball", + slog.WarnContext(context.Background(), "Unsupported type flag for tarball", "type_flag", header.Typeflag, "header", header.Name, ) From e5966d8f0aa174e96f74b5241cbc9f7697afb16d Mon Sep 17 00:00:00 2001 From: Alan Parra Date: Wed, 8 Jan 2025 10:50:45 -0300 Subject: [PATCH 07/45] chore: Bump google.golang.org/protobuf to v1.36.2 (#50841) * chore: Bump google.golang.org/protobuf to v1.36.2 * Tidy modules * Update generated protos --- api/client/proto/event.pb.go | 2 +- api/gen/proto/go/teleport/accessgraph/v1/authorized_key.pb.go | 2 +- api/gen/proto/go/teleport/accessgraph/v1/private_key.pb.go | 2 +- .../proto/go/teleport/accessgraph/v1/secrets_service.pb.go | 2 +- api/gen/proto/go/teleport/accesslist/v1/accesslist.pb.go | 2 +- .../proto/go/teleport/accesslist/v1/accesslist_service.pb.go | 2 +- .../accessmonitoringrules/v1/access_monitoring_rules.pb.go | 2 +- .../v1/access_monitoring_rules_service.pb.go | 2 +- api/gen/proto/go/teleport/auditlog/v1/auditlog.pb.go | 2 +- api/gen/proto/go/teleport/autoupdate/v1/autoupdate.pb.go | 2 +- .../proto/go/teleport/autoupdate/v1/autoupdate_service.pb.go | 2 +- api/gen/proto/go/teleport/clusterconfig/v1/access_graph.pb.go | 2 +- .../go/teleport/clusterconfig/v1/access_graph_settings.pb.go | 2 +- .../go/teleport/clusterconfig/v1/clusterconfig_service.pb.go | 2 +- api/gen/proto/go/teleport/crownjewel/v1/crownjewel.pb.go | 2 +- .../proto/go/teleport/crownjewel/v1/crownjewel_service.pb.go | 2 +- api/gen/proto/go/teleport/dbobject/v1/dbobject.pb.go | 2 +- api/gen/proto/go/teleport/dbobject/v1/dbobject_service.pb.go | 2 +- .../teleport/dbobjectimportrule/v1/dbobjectimportrule.pb.go | 2 +- .../dbobjectimportrule/v1/dbobjectimportrule_service.pb.go | 2 +- .../proto/go/teleport/decision/v1alpha1/database_access.pb.go | 2 +- .../go/teleport/decision/v1alpha1/decision_service.pb.go | 2 +- .../proto/go/teleport/decision/v1alpha1/denial_metadata.pb.go | 2 +- .../go/teleport/decision/v1alpha1/enforcement_feature.pb.go | 2 +- .../proto/go/teleport/decision/v1alpha1/permit_metadata.pb.go | 2 +- .../go/teleport/decision/v1alpha1/request_metadata.pb.go | 2 +- api/gen/proto/go/teleport/decision/v1alpha1/resource.pb.go | 2 +- api/gen/proto/go/teleport/decision/v1alpha1/ssh_access.pb.go | 2 +- .../proto/go/teleport/decision/v1alpha1/ssh_identity.pb.go | 2 +- .../proto/go/teleport/decision/v1alpha1/tls_identity.pb.go | 2 +- api/gen/proto/go/teleport/devicetrust/v1/assert.pb.go | 2 +- .../go/teleport/devicetrust/v1/authenticate_challenge.pb.go | 2 +- api/gen/proto/go/teleport/devicetrust/v1/device.pb.go | 2 +- .../go/teleport/devicetrust/v1/device_collected_data.pb.go | 2 +- .../teleport/devicetrust/v1/device_confirmation_token.pb.go | 2 +- .../go/teleport/devicetrust/v1/device_enroll_token.pb.go | 2 +- api/gen/proto/go/teleport/devicetrust/v1/device_profile.pb.go | 2 +- api/gen/proto/go/teleport/devicetrust/v1/device_source.pb.go | 2 +- .../proto/go/teleport/devicetrust/v1/device_web_token.pb.go | 2 +- .../go/teleport/devicetrust/v1/devicetrust_service.pb.go | 2 +- api/gen/proto/go/teleport/devicetrust/v1/os_type.pb.go | 2 +- api/gen/proto/go/teleport/devicetrust/v1/tpm.pb.go | 2 +- api/gen/proto/go/teleport/devicetrust/v1/usage.pb.go | 2 +- .../proto/go/teleport/devicetrust/v1/user_certificates.pb.go | 2 +- .../go/teleport/discoveryconfig/v1/discoveryconfig.pb.go | 2 +- .../teleport/discoveryconfig/v1/discoveryconfig_service.pb.go | 2 +- .../teleport/dynamicwindows/v1/dynamicwindows_service.pb.go | 2 +- api/gen/proto/go/teleport/embedding/v1/embedding.pb.go | 2 +- .../externalauditstorage/v1/externalauditstorage.pb.go | 2 +- .../v1/externalauditstorage_service.pb.go | 2 +- .../proto/go/teleport/gitserver/v1/git_server_service.pb.go | 2 +- api/gen/proto/go/teleport/header/v1/metadata.pb.go | 2 +- api/gen/proto/go/teleport/header/v1/resourceheader.pb.go | 2 +- .../proto/go/teleport/identitycenter/v1/identitycenter.pb.go | 2 +- .../teleport/identitycenter/v1/identitycenter_service.pb.go | 2 +- .../proto/go/teleport/integration/v1/awsoidc_service.pb.go | 2 +- .../go/teleport/integration/v1/integration_service.pb.go | 2 +- api/gen/proto/go/teleport/kube/v1/kube_service.pb.go | 2 +- .../kubewaitingcontainer/v1/kubewaitingcontainer.pb.go | 2 +- .../v1/kubewaitingcontainer_service.pb.go | 2 +- api/gen/proto/go/teleport/label/v1/label.pb.go | 2 +- api/gen/proto/go/teleport/loginrule/v1/loginrule.pb.go | 2 +- .../proto/go/teleport/loginrule/v1/loginrule_service.pb.go | 2 +- api/gen/proto/go/teleport/machineid/v1/bot.pb.go | 2 +- api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go | 2 +- .../proto/go/teleport/machineid/v1/bot_instance_service.pb.go | 2 +- api/gen/proto/go/teleport/machineid/v1/bot_service.pb.go | 2 +- api/gen/proto/go/teleport/machineid/v1/federation.pb.go | 2 +- .../proto/go/teleport/machineid/v1/federation_service.pb.go | 2 +- .../go/teleport/machineid/v1/workload_identity_service.pb.go | 2 +- .../proto/go/teleport/notifications/v1/notifications.pb.go | 2 +- .../go/teleport/notifications/v1/notifications_service.pb.go | 2 +- api/gen/proto/go/teleport/okta/v1/okta_service.pb.go | 2 +- api/gen/proto/go/teleport/plugins/v1/plugin_service.pb.go | 2 +- api/gen/proto/go/teleport/presence/v1/service.pb.go | 2 +- api/gen/proto/go/teleport/provisioning/v1/provisioning.pb.go | 2 +- .../go/teleport/provisioning/v1/provisioning_service.pb.go | 2 +- .../proto/go/teleport/resourceusage/v1/access_requests.pb.go | 2 +- .../go/teleport/resourceusage/v1/account_usage_type.pb.go | 2 +- api/gen/proto/go/teleport/resourceusage/v1/device_trust.pb.go | 2 +- .../go/teleport/resourceusage/v1/resourceusage_service.pb.go | 2 +- api/gen/proto/go/teleport/samlidp/v1/samlidp.pb.go | 2 +- api/gen/proto/go/teleport/scim/v1/scim_service.pb.go | 2 +- api/gen/proto/go/teleport/secreports/v1/secreports.pb.go | 2 +- .../proto/go/teleport/secreports/v1/secreports_service.pb.go | 2 +- api/gen/proto/go/teleport/trait/v1/trait.pb.go | 2 +- .../proto/go/teleport/transport/v1/transport_service.pb.go | 2 +- api/gen/proto/go/teleport/trust/v1/trust_service.pb.go | 2 +- .../proto/go/teleport/userloginstate/v1/userloginstate.pb.go | 2 +- .../teleport/userloginstate/v1/userloginstate_service.pb.go | 2 +- .../go/teleport/userprovisioning/v2/statichostuser.pb.go | 2 +- .../teleport/userprovisioning/v2/statichostuser_service.pb.go | 2 +- api/gen/proto/go/teleport/users/v1/users_service.pb.go | 2 +- api/gen/proto/go/teleport/usertasks/v1/user_tasks.pb.go | 2 +- .../proto/go/teleport/usertasks/v1/user_tasks_service.pb.go | 2 +- api/gen/proto/go/teleport/vnet/v1/vnet_config.pb.go | 2 +- api/gen/proto/go/teleport/vnet/v1/vnet_config_service.pb.go | 2 +- api/gen/proto/go/teleport/workloadidentity/v1/attrs.pb.go | 2 +- .../go/teleport/workloadidentity/v1/issuance_service.pb.go | 2 +- .../proto/go/teleport/workloadidentity/v1/join_attrs.pb.go | 2 +- api/gen/proto/go/teleport/workloadidentity/v1/resource.pb.go | 2 +- .../go/teleport/workloadidentity/v1/resource_service.pb.go | 2 +- api/gen/proto/go/userpreferences/v1/access_graph.pb.go | 2 +- api/gen/proto/go/userpreferences/v1/assist.pb.go | 2 +- api/gen/proto/go/userpreferences/v1/cluster_preferences.pb.go | 2 +- api/gen/proto/go/userpreferences/v1/onboard.pb.go | 2 +- api/gen/proto/go/userpreferences/v1/sidenav_preferences.pb.go | 2 +- api/gen/proto/go/userpreferences/v1/theme.pb.go | 2 +- .../go/userpreferences/v1/unified_resource_preferences.pb.go | 2 +- api/gen/proto/go/userpreferences/v1/userpreferences.pb.go | 2 +- api/go.mod | 2 +- api/go.sum | 4 ++-- gen/proto/go/accessgraph/v1alpha/access_graph_service.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/aws.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/azure.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/entra.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/events.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/gitlab.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/graph.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/netiq.pb.go | 2 +- gen/proto/go/accessgraph/v1alpha/resources.pb.go | 2 +- gen/proto/go/prehog/v1/teleport.pb.go | 2 +- gen/proto/go/prehog/v1alpha/connect.pb.go | 2 +- gen/proto/go/prehog/v1alpha/tbot.pb.go | 2 +- gen/proto/go/prehog/v1alpha/teleport.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/access_request.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/app.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/auth_settings.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/cluster.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/database.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/kube.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/label.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/server.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/service.pb.go | 2 +- .../go/teleport/lib/teleterm/v1/tshd_events_service.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/v1/usage_events.pb.go | 2 +- gen/proto/go/teleport/lib/teleterm/vnet/v1/vnet_service.pb.go | 2 +- gen/proto/go/teleport/quicpeering/v1alpha/dial.pb.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- integrations/event-handler/go.mod | 2 +- integrations/event-handler/go.sum | 4 ++-- integrations/terraform/go.mod | 2 +- integrations/terraform/go.sum | 4 ++-- lib/multiplexer/test/ping.pb.go | 2 +- 146 files changed, 150 insertions(+), 150 deletions(-) diff --git a/api/client/proto/event.pb.go b/api/client/proto/event.pb.go index 0cd4d8e9a4eb5..c88802d8e5f51 100644 --- a/api/client/proto/event.pb.go +++ b/api/client/proto/event.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/legacy/client/proto/event.proto diff --git a/api/gen/proto/go/teleport/accessgraph/v1/authorized_key.pb.go b/api/gen/proto/go/teleport/accessgraph/v1/authorized_key.pb.go index 58afbe766d1bb..d613a6d5eff54 100644 --- a/api/gen/proto/go/teleport/accessgraph/v1/authorized_key.pb.go +++ b/api/gen/proto/go/teleport/accessgraph/v1/authorized_key.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/access_graph/v1/authorized_key.proto diff --git a/api/gen/proto/go/teleport/accessgraph/v1/private_key.pb.go b/api/gen/proto/go/teleport/accessgraph/v1/private_key.pb.go index 4b93f506a0a29..71db5e0c9d0be 100644 --- a/api/gen/proto/go/teleport/accessgraph/v1/private_key.pb.go +++ b/api/gen/proto/go/teleport/accessgraph/v1/private_key.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/access_graph/v1/private_key.proto diff --git a/api/gen/proto/go/teleport/accessgraph/v1/secrets_service.pb.go b/api/gen/proto/go/teleport/accessgraph/v1/secrets_service.pb.go index 0159b47a0950d..b9e8803d3be72 100644 --- a/api/gen/proto/go/teleport/accessgraph/v1/secrets_service.pb.go +++ b/api/gen/proto/go/teleport/accessgraph/v1/secrets_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/access_graph/v1/secrets_service.proto diff --git a/api/gen/proto/go/teleport/accesslist/v1/accesslist.pb.go b/api/gen/proto/go/teleport/accesslist/v1/accesslist.pb.go index 1eefd6d3e6c28..eef7ebbe00327 100644 --- a/api/gen/proto/go/teleport/accesslist/v1/accesslist.pb.go +++ b/api/gen/proto/go/teleport/accesslist/v1/accesslist.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/accesslist/v1/accesslist.proto diff --git a/api/gen/proto/go/teleport/accesslist/v1/accesslist_service.pb.go b/api/gen/proto/go/teleport/accesslist/v1/accesslist_service.pb.go index d79fa7dd8b5ce..64db5a9bb1554 100644 --- a/api/gen/proto/go/teleport/accesslist/v1/accesslist_service.pb.go +++ b/api/gen/proto/go/teleport/accesslist/v1/accesslist_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/accesslist/v1/accesslist_service.proto diff --git a/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules.pb.go b/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules.pb.go index e5f23f5ae32ab..5a8daa9e5d0dd 100644 --- a/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules.pb.go +++ b/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/accessmonitoringrules/v1/access_monitoring_rules.proto diff --git a/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules_service.pb.go b/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules_service.pb.go index 624bdd8c8c339..6f795523dd141 100644 --- a/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules_service.pb.go +++ b/api/gen/proto/go/teleport/accessmonitoringrules/v1/access_monitoring_rules_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/accessmonitoringrules/v1/access_monitoring_rules_service.proto diff --git a/api/gen/proto/go/teleport/auditlog/v1/auditlog.pb.go b/api/gen/proto/go/teleport/auditlog/v1/auditlog.pb.go index 6915adf45f0b7..4ab166ac2b850 100644 --- a/api/gen/proto/go/teleport/auditlog/v1/auditlog.pb.go +++ b/api/gen/proto/go/teleport/auditlog/v1/auditlog.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/auditlog/v1/auditlog.proto diff --git a/api/gen/proto/go/teleport/autoupdate/v1/autoupdate.pb.go b/api/gen/proto/go/teleport/autoupdate/v1/autoupdate.pb.go index ebf89e4f8e971..524dfb87bccd0 100644 --- a/api/gen/proto/go/teleport/autoupdate/v1/autoupdate.pb.go +++ b/api/gen/proto/go/teleport/autoupdate/v1/autoupdate.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/autoupdate/v1/autoupdate.proto diff --git a/api/gen/proto/go/teleport/autoupdate/v1/autoupdate_service.pb.go b/api/gen/proto/go/teleport/autoupdate/v1/autoupdate_service.pb.go index 46fd39e2c983d..513b8e42323b5 100644 --- a/api/gen/proto/go/teleport/autoupdate/v1/autoupdate_service.pb.go +++ b/api/gen/proto/go/teleport/autoupdate/v1/autoupdate_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/autoupdate/v1/autoupdate_service.proto diff --git a/api/gen/proto/go/teleport/clusterconfig/v1/access_graph.pb.go b/api/gen/proto/go/teleport/clusterconfig/v1/access_graph.pb.go index 0bbe9d081d7e1..64223a9c8b836 100644 --- a/api/gen/proto/go/teleport/clusterconfig/v1/access_graph.pb.go +++ b/api/gen/proto/go/teleport/clusterconfig/v1/access_graph.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/clusterconfig/v1/access_graph.proto diff --git a/api/gen/proto/go/teleport/clusterconfig/v1/access_graph_settings.pb.go b/api/gen/proto/go/teleport/clusterconfig/v1/access_graph_settings.pb.go index 78ae3205b43dd..cff7dd560d683 100644 --- a/api/gen/proto/go/teleport/clusterconfig/v1/access_graph_settings.pb.go +++ b/api/gen/proto/go/teleport/clusterconfig/v1/access_graph_settings.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/clusterconfig/v1/access_graph_settings.proto diff --git a/api/gen/proto/go/teleport/clusterconfig/v1/clusterconfig_service.pb.go b/api/gen/proto/go/teleport/clusterconfig/v1/clusterconfig_service.pb.go index 41e324787bdeb..52e603d231de2 100644 --- a/api/gen/proto/go/teleport/clusterconfig/v1/clusterconfig_service.pb.go +++ b/api/gen/proto/go/teleport/clusterconfig/v1/clusterconfig_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/clusterconfig/v1/clusterconfig_service.proto diff --git a/api/gen/proto/go/teleport/crownjewel/v1/crownjewel.pb.go b/api/gen/proto/go/teleport/crownjewel/v1/crownjewel.pb.go index 701b289b159ac..e52a998d19d77 100644 --- a/api/gen/proto/go/teleport/crownjewel/v1/crownjewel.pb.go +++ b/api/gen/proto/go/teleport/crownjewel/v1/crownjewel.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/crownjewel/v1/crownjewel.proto diff --git a/api/gen/proto/go/teleport/crownjewel/v1/crownjewel_service.pb.go b/api/gen/proto/go/teleport/crownjewel/v1/crownjewel_service.pb.go index 8db11e390b127..c806430ef8642 100644 --- a/api/gen/proto/go/teleport/crownjewel/v1/crownjewel_service.pb.go +++ b/api/gen/proto/go/teleport/crownjewel/v1/crownjewel_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/crownjewel/v1/crownjewel_service.proto diff --git a/api/gen/proto/go/teleport/dbobject/v1/dbobject.pb.go b/api/gen/proto/go/teleport/dbobject/v1/dbobject.pb.go index 3c4ac98b7619e..5a897b7d2c340 100644 --- a/api/gen/proto/go/teleport/dbobject/v1/dbobject.pb.go +++ b/api/gen/proto/go/teleport/dbobject/v1/dbobject.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/dbobject/v1/dbobject.proto diff --git a/api/gen/proto/go/teleport/dbobject/v1/dbobject_service.pb.go b/api/gen/proto/go/teleport/dbobject/v1/dbobject_service.pb.go index a0ce791c81ad2..8fb77f7a26e41 100644 --- a/api/gen/proto/go/teleport/dbobject/v1/dbobject_service.pb.go +++ b/api/gen/proto/go/teleport/dbobject/v1/dbobject_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/dbobject/v1/dbobject_service.proto diff --git a/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule.pb.go b/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule.pb.go index 0549857f14423..e941d6c4a23d5 100644 --- a/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule.pb.go +++ b/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/dbobjectimportrule/v1/dbobjectimportrule.proto diff --git a/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule_service.pb.go b/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule_service.pb.go index 480b571dec331..f957e01c481ee 100644 --- a/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule_service.pb.go +++ b/api/gen/proto/go/teleport/dbobjectimportrule/v1/dbobjectimportrule_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/dbobjectimportrule/v1/dbobjectimportrule_service.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/database_access.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/database_access.pb.go index f60a120e794e5..f4966dc82ca8a 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/database_access.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/database_access.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/database_access.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/decision_service.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/decision_service.pb.go index d50f86f8a1a7f..3e55756c9e6b6 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/decision_service.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/decision_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/decision_service.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/denial_metadata.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/denial_metadata.pb.go index d915867e0186d..dca641e4c51fe 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/denial_metadata.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/denial_metadata.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/denial_metadata.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/enforcement_feature.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/enforcement_feature.pb.go index 4f62700671f18..44a3c45015de5 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/enforcement_feature.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/enforcement_feature.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/enforcement_feature.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/permit_metadata.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/permit_metadata.pb.go index 619193d751c90..42ebd4b919bf9 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/permit_metadata.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/permit_metadata.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/permit_metadata.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/request_metadata.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/request_metadata.pb.go index 4ad047ca54f6f..f65a345ef7cfc 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/request_metadata.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/request_metadata.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/request_metadata.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/resource.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/resource.pb.go index a7d7f19e53d09..1186273ed79c0 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/resource.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/resource.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/resource.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/ssh_access.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/ssh_access.pb.go index 2c88519fe0668..58151a58576b5 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/ssh_access.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/ssh_access.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/ssh_access.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/ssh_identity.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/ssh_identity.pb.go index 34833d21cb83b..cc6efd2e95609 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/ssh_identity.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/ssh_identity.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/ssh_identity.proto diff --git a/api/gen/proto/go/teleport/decision/v1alpha1/tls_identity.pb.go b/api/gen/proto/go/teleport/decision/v1alpha1/tls_identity.pb.go index 08547f70b19e3..46db8cead9172 100644 --- a/api/gen/proto/go/teleport/decision/v1alpha1/tls_identity.pb.go +++ b/api/gen/proto/go/teleport/decision/v1alpha1/tls_identity.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/decision/v1alpha1/tls_identity.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/assert.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/assert.pb.go index 5a3baf76bed4d..0f18aba139bae 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/assert.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/assert.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/assert.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/authenticate_challenge.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/authenticate_challenge.pb.go index b77c2bb6e60f7..9cc6c052bd5fd 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/authenticate_challenge.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/authenticate_challenge.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/authenticate_challenge.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/device.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/device.pb.go index 28982e37a9833..88473cac3fed6 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/device.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/device.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/device.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/device_collected_data.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/device_collected_data.pb.go index 9a4c6852c7c98..1dde056b835f2 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/device_collected_data.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/device_collected_data.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/device_collected_data.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/device_confirmation_token.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/device_confirmation_token.pb.go index 437859da2de81..06f26bfb229f9 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/device_confirmation_token.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/device_confirmation_token.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/device_confirmation_token.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/device_enroll_token.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/device_enroll_token.pb.go index 870d58b0c8191..156fe424a92ad 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/device_enroll_token.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/device_enroll_token.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/device_enroll_token.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/device_profile.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/device_profile.pb.go index 25d07d180601d..1b8901e5b5209 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/device_profile.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/device_profile.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/device_profile.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/device_source.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/device_source.pb.go index 05e94ea93924e..4656525d48ad0 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/device_source.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/device_source.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/device_source.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/device_web_token.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/device_web_token.pb.go index 6ed47b8c22ef1..f659a2241785c 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/device_web_token.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/device_web_token.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/device_web_token.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/devicetrust_service.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/devicetrust_service.pb.go index fa9bcb045acd0..d0ce571c7647d 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/devicetrust_service.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/devicetrust_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/devicetrust_service.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/os_type.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/os_type.pb.go index 86f87c2696f2c..7b7e523c51788 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/os_type.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/os_type.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/os_type.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/tpm.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/tpm.pb.go index 8f096a0b12fab..05e68f0be42e5 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/tpm.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/tpm.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/tpm.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/usage.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/usage.pb.go index 7017dc863c8e7..04d16706c2949 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/usage.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/usage.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/usage.proto diff --git a/api/gen/proto/go/teleport/devicetrust/v1/user_certificates.pb.go b/api/gen/proto/go/teleport/devicetrust/v1/user_certificates.pb.go index 2e1777356b79e..c7b709f69740a 100644 --- a/api/gen/proto/go/teleport/devicetrust/v1/user_certificates.pb.go +++ b/api/gen/proto/go/teleport/devicetrust/v1/user_certificates.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/devicetrust/v1/user_certificates.proto diff --git a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig.pb.go b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig.pb.go index 45246e9784a65..ee9fabd36c5b1 100644 --- a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig.pb.go +++ b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/discoveryconfig/v1/discoveryconfig.proto diff --git a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go index ed9ba6ca3192f..865271a0ea0ed 100644 --- a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go +++ b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/discoveryconfig/v1/discoveryconfig_service.proto diff --git a/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go b/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go index 2775a75226d38..8a0cd96cce0db 100644 --- a/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go +++ b/api/gen/proto/go/teleport/dynamicwindows/v1/dynamicwindows_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/dynamicwindows/v1/dynamicwindows_service.proto diff --git a/api/gen/proto/go/teleport/embedding/v1/embedding.pb.go b/api/gen/proto/go/teleport/embedding/v1/embedding.pb.go index e8e6468e43d50..f323b7f950c9d 100644 --- a/api/gen/proto/go/teleport/embedding/v1/embedding.pb.go +++ b/api/gen/proto/go/teleport/embedding/v1/embedding.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/embedding/v1/embedding.proto diff --git a/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage.pb.go b/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage.pb.go index 8c5496ac23f7e..a3ef4dbcd9638 100644 --- a/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage.pb.go +++ b/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/externalauditstorage/v1/externalauditstorage.proto diff --git a/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage_service.pb.go b/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage_service.pb.go index a92b3db942e2e..8516f46943333 100644 --- a/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage_service.pb.go +++ b/api/gen/proto/go/teleport/externalauditstorage/v1/externalauditstorage_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/externalauditstorage/v1/externalauditstorage_service.proto diff --git a/api/gen/proto/go/teleport/gitserver/v1/git_server_service.pb.go b/api/gen/proto/go/teleport/gitserver/v1/git_server_service.pb.go index f27cca6f71657..185e85c34e7d5 100644 --- a/api/gen/proto/go/teleport/gitserver/v1/git_server_service.pb.go +++ b/api/gen/proto/go/teleport/gitserver/v1/git_server_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/gitserver/v1/git_server_service.proto diff --git a/api/gen/proto/go/teleport/header/v1/metadata.pb.go b/api/gen/proto/go/teleport/header/v1/metadata.pb.go index 08cb5db8f338e..f9765ca55bc3e 100644 --- a/api/gen/proto/go/teleport/header/v1/metadata.pb.go +++ b/api/gen/proto/go/teleport/header/v1/metadata.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/header/v1/metadata.proto diff --git a/api/gen/proto/go/teleport/header/v1/resourceheader.pb.go b/api/gen/proto/go/teleport/header/v1/resourceheader.pb.go index 432d595d89825..43a144afa9678 100644 --- a/api/gen/proto/go/teleport/header/v1/resourceheader.pb.go +++ b/api/gen/proto/go/teleport/header/v1/resourceheader.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/header/v1/resourceheader.proto diff --git a/api/gen/proto/go/teleport/identitycenter/v1/identitycenter.pb.go b/api/gen/proto/go/teleport/identitycenter/v1/identitycenter.pb.go index f2c2277101eae..22025bbd423cc 100644 --- a/api/gen/proto/go/teleport/identitycenter/v1/identitycenter.pb.go +++ b/api/gen/proto/go/teleport/identitycenter/v1/identitycenter.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/identitycenter/v1/identitycenter.proto diff --git a/api/gen/proto/go/teleport/identitycenter/v1/identitycenter_service.pb.go b/api/gen/proto/go/teleport/identitycenter/v1/identitycenter_service.pb.go index d35581691f971..be8d88f993e1a 100644 --- a/api/gen/proto/go/teleport/identitycenter/v1/identitycenter_service.pb.go +++ b/api/gen/proto/go/teleport/identitycenter/v1/identitycenter_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/identitycenter/v1/identitycenter_service.proto diff --git a/api/gen/proto/go/teleport/integration/v1/awsoidc_service.pb.go b/api/gen/proto/go/teleport/integration/v1/awsoidc_service.pb.go index 31b04b0e44c15..7bdd2a53243dd 100644 --- a/api/gen/proto/go/teleport/integration/v1/awsoidc_service.pb.go +++ b/api/gen/proto/go/teleport/integration/v1/awsoidc_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/integration/v1/awsoidc_service.proto diff --git a/api/gen/proto/go/teleport/integration/v1/integration_service.pb.go b/api/gen/proto/go/teleport/integration/v1/integration_service.pb.go index e2585cd02c4c3..1dc5813abc7cb 100644 --- a/api/gen/proto/go/teleport/integration/v1/integration_service.pb.go +++ b/api/gen/proto/go/teleport/integration/v1/integration_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/integration/v1/integration_service.proto diff --git a/api/gen/proto/go/teleport/kube/v1/kube_service.pb.go b/api/gen/proto/go/teleport/kube/v1/kube_service.pb.go index 883afa302bc90..c5c66271694b1 100644 --- a/api/gen/proto/go/teleport/kube/v1/kube_service.pb.go +++ b/api/gen/proto/go/teleport/kube/v1/kube_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/kube/v1/kube_service.proto diff --git a/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer.pb.go b/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer.pb.go index a2f027e4d512f..a01f49eb6120e 100644 --- a/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer.pb.go +++ b/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/kubewaitingcontainer/v1/kubewaitingcontainer.proto diff --git a/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer_service.pb.go b/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer_service.pb.go index 10a50822351e5..035a86570dbfb 100644 --- a/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer_service.pb.go +++ b/api/gen/proto/go/teleport/kubewaitingcontainer/v1/kubewaitingcontainer_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/kubewaitingcontainer/v1/kubewaitingcontainer_service.proto diff --git a/api/gen/proto/go/teleport/label/v1/label.pb.go b/api/gen/proto/go/teleport/label/v1/label.pb.go index 25b3eb2f0f2e0..9ed8eed1684b2 100644 --- a/api/gen/proto/go/teleport/label/v1/label.pb.go +++ b/api/gen/proto/go/teleport/label/v1/label.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/label/v1/label.proto diff --git a/api/gen/proto/go/teleport/loginrule/v1/loginrule.pb.go b/api/gen/proto/go/teleport/loginrule/v1/loginrule.pb.go index 43c89fb3572c9..a0f2b35ce0f67 100644 --- a/api/gen/proto/go/teleport/loginrule/v1/loginrule.pb.go +++ b/api/gen/proto/go/teleport/loginrule/v1/loginrule.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/loginrule/v1/loginrule.proto diff --git a/api/gen/proto/go/teleport/loginrule/v1/loginrule_service.pb.go b/api/gen/proto/go/teleport/loginrule/v1/loginrule_service.pb.go index cb1b8d15ab424..b0eb24bbbc693 100644 --- a/api/gen/proto/go/teleport/loginrule/v1/loginrule_service.pb.go +++ b/api/gen/proto/go/teleport/loginrule/v1/loginrule_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/loginrule/v1/loginrule_service.proto diff --git a/api/gen/proto/go/teleport/machineid/v1/bot.pb.go b/api/gen/proto/go/teleport/machineid/v1/bot.pb.go index 49ae2c9dacf97..9ad66ade8a58e 100644 --- a/api/gen/proto/go/teleport/machineid/v1/bot.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/bot.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/machineid/v1/bot.proto diff --git a/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go b/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go index ec0d5c2dd24d3..b6bc9a0d0320e 100644 --- a/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/bot_instance.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/machineid/v1/bot_instance.proto diff --git a/api/gen/proto/go/teleport/machineid/v1/bot_instance_service.pb.go b/api/gen/proto/go/teleport/machineid/v1/bot_instance_service.pb.go index c7035de1776ae..c398d2b93b091 100644 --- a/api/gen/proto/go/teleport/machineid/v1/bot_instance_service.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/bot_instance_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/machineid/v1/bot_instance_service.proto diff --git a/api/gen/proto/go/teleport/machineid/v1/bot_service.pb.go b/api/gen/proto/go/teleport/machineid/v1/bot_service.pb.go index 1f475a16c9ea9..71b9b66a152cc 100644 --- a/api/gen/proto/go/teleport/machineid/v1/bot_service.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/bot_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/machineid/v1/bot_service.proto diff --git a/api/gen/proto/go/teleport/machineid/v1/federation.pb.go b/api/gen/proto/go/teleport/machineid/v1/federation.pb.go index 7dfe88cd93e28..12e5429f648fb 100644 --- a/api/gen/proto/go/teleport/machineid/v1/federation.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/federation.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/machineid/v1/federation.proto diff --git a/api/gen/proto/go/teleport/machineid/v1/federation_service.pb.go b/api/gen/proto/go/teleport/machineid/v1/federation_service.pb.go index bfa2ee7f2546a..5c26dd6f9bb1d 100644 --- a/api/gen/proto/go/teleport/machineid/v1/federation_service.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/federation_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/machineid/v1/federation_service.proto diff --git a/api/gen/proto/go/teleport/machineid/v1/workload_identity_service.pb.go b/api/gen/proto/go/teleport/machineid/v1/workload_identity_service.pb.go index 21a488a6c2c98..d4a225e601a9d 100644 --- a/api/gen/proto/go/teleport/machineid/v1/workload_identity_service.pb.go +++ b/api/gen/proto/go/teleport/machineid/v1/workload_identity_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/machineid/v1/workload_identity_service.proto diff --git a/api/gen/proto/go/teleport/notifications/v1/notifications.pb.go b/api/gen/proto/go/teleport/notifications/v1/notifications.pb.go index 996bd7492ecb9..56a73beb0b894 100644 --- a/api/gen/proto/go/teleport/notifications/v1/notifications.pb.go +++ b/api/gen/proto/go/teleport/notifications/v1/notifications.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/notifications/v1/notifications.proto diff --git a/api/gen/proto/go/teleport/notifications/v1/notifications_service.pb.go b/api/gen/proto/go/teleport/notifications/v1/notifications_service.pb.go index 3a5e485bdf899..31b557bd60d8f 100644 --- a/api/gen/proto/go/teleport/notifications/v1/notifications_service.pb.go +++ b/api/gen/proto/go/teleport/notifications/v1/notifications_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/notifications/v1/notifications_service.proto diff --git a/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go b/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go index a093e7aa22673..019c29c099fbd 100644 --- a/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go +++ b/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/okta/v1/okta_service.proto diff --git a/api/gen/proto/go/teleport/plugins/v1/plugin_service.pb.go b/api/gen/proto/go/teleport/plugins/v1/plugin_service.pb.go index 17992a7f0ec61..9548861c0e2fc 100644 --- a/api/gen/proto/go/teleport/plugins/v1/plugin_service.pb.go +++ b/api/gen/proto/go/teleport/plugins/v1/plugin_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/plugins/v1/plugin_service.proto diff --git a/api/gen/proto/go/teleport/presence/v1/service.pb.go b/api/gen/proto/go/teleport/presence/v1/service.pb.go index 7e730569d135f..630746a55ae1f 100644 --- a/api/gen/proto/go/teleport/presence/v1/service.pb.go +++ b/api/gen/proto/go/teleport/presence/v1/service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/presence/v1/service.proto diff --git a/api/gen/proto/go/teleport/provisioning/v1/provisioning.pb.go b/api/gen/proto/go/teleport/provisioning/v1/provisioning.pb.go index 89f958940e704..36ef554f68894 100644 --- a/api/gen/proto/go/teleport/provisioning/v1/provisioning.pb.go +++ b/api/gen/proto/go/teleport/provisioning/v1/provisioning.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/provisioning/v1/provisioning.proto diff --git a/api/gen/proto/go/teleport/provisioning/v1/provisioning_service.pb.go b/api/gen/proto/go/teleport/provisioning/v1/provisioning_service.pb.go index 9afe3669e6b5c..c4ecb771ed3f9 100644 --- a/api/gen/proto/go/teleport/provisioning/v1/provisioning_service.pb.go +++ b/api/gen/proto/go/teleport/provisioning/v1/provisioning_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/provisioning/v1/provisioning_service.proto diff --git a/api/gen/proto/go/teleport/resourceusage/v1/access_requests.pb.go b/api/gen/proto/go/teleport/resourceusage/v1/access_requests.pb.go index b0f800e4cc531..572eaf6be5379 100644 --- a/api/gen/proto/go/teleport/resourceusage/v1/access_requests.pb.go +++ b/api/gen/proto/go/teleport/resourceusage/v1/access_requests.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/resourceusage/v1/access_requests.proto diff --git a/api/gen/proto/go/teleport/resourceusage/v1/account_usage_type.pb.go b/api/gen/proto/go/teleport/resourceusage/v1/account_usage_type.pb.go index 774a416ac63be..7371056acaea1 100644 --- a/api/gen/proto/go/teleport/resourceusage/v1/account_usage_type.pb.go +++ b/api/gen/proto/go/teleport/resourceusage/v1/account_usage_type.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/resourceusage/v1/account_usage_type.proto diff --git a/api/gen/proto/go/teleport/resourceusage/v1/device_trust.pb.go b/api/gen/proto/go/teleport/resourceusage/v1/device_trust.pb.go index 2f1edefaa1d97..f22dbc7192790 100644 --- a/api/gen/proto/go/teleport/resourceusage/v1/device_trust.pb.go +++ b/api/gen/proto/go/teleport/resourceusage/v1/device_trust.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/resourceusage/v1/device_trust.proto diff --git a/api/gen/proto/go/teleport/resourceusage/v1/resourceusage_service.pb.go b/api/gen/proto/go/teleport/resourceusage/v1/resourceusage_service.pb.go index 56434b3e61391..2ee2f531557a6 100644 --- a/api/gen/proto/go/teleport/resourceusage/v1/resourceusage_service.pb.go +++ b/api/gen/proto/go/teleport/resourceusage/v1/resourceusage_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/resourceusage/v1/resourceusage_service.proto diff --git a/api/gen/proto/go/teleport/samlidp/v1/samlidp.pb.go b/api/gen/proto/go/teleport/samlidp/v1/samlidp.pb.go index 3907eb1233c15..726931a041f63 100644 --- a/api/gen/proto/go/teleport/samlidp/v1/samlidp.pb.go +++ b/api/gen/proto/go/teleport/samlidp/v1/samlidp.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/samlidp/v1/samlidp.proto diff --git a/api/gen/proto/go/teleport/scim/v1/scim_service.pb.go b/api/gen/proto/go/teleport/scim/v1/scim_service.pb.go index 6011ca60f276f..df311cb21183e 100644 --- a/api/gen/proto/go/teleport/scim/v1/scim_service.pb.go +++ b/api/gen/proto/go/teleport/scim/v1/scim_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/scim/v1/scim_service.proto diff --git a/api/gen/proto/go/teleport/secreports/v1/secreports.pb.go b/api/gen/proto/go/teleport/secreports/v1/secreports.pb.go index 09ac982395bf1..5f78d6170d151 100644 --- a/api/gen/proto/go/teleport/secreports/v1/secreports.pb.go +++ b/api/gen/proto/go/teleport/secreports/v1/secreports.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/secreports/v1/secreports.proto diff --git a/api/gen/proto/go/teleport/secreports/v1/secreports_service.pb.go b/api/gen/proto/go/teleport/secreports/v1/secreports_service.pb.go index 8a73244cc5a72..0dba69269566e 100644 --- a/api/gen/proto/go/teleport/secreports/v1/secreports_service.pb.go +++ b/api/gen/proto/go/teleport/secreports/v1/secreports_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/secreports/v1/secreports_service.proto diff --git a/api/gen/proto/go/teleport/trait/v1/trait.pb.go b/api/gen/proto/go/teleport/trait/v1/trait.pb.go index 019987e24e0fb..eded56dc78b4c 100644 --- a/api/gen/proto/go/teleport/trait/v1/trait.pb.go +++ b/api/gen/proto/go/teleport/trait/v1/trait.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/trait/v1/trait.proto diff --git a/api/gen/proto/go/teleport/transport/v1/transport_service.pb.go b/api/gen/proto/go/teleport/transport/v1/transport_service.pb.go index 38d8b33eb7511..ea95b70499d76 100644 --- a/api/gen/proto/go/teleport/transport/v1/transport_service.pb.go +++ b/api/gen/proto/go/teleport/transport/v1/transport_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/transport/v1/transport_service.proto diff --git a/api/gen/proto/go/teleport/trust/v1/trust_service.pb.go b/api/gen/proto/go/teleport/trust/v1/trust_service.pb.go index ca26508234431..368f3af6c410b 100644 --- a/api/gen/proto/go/teleport/trust/v1/trust_service.pb.go +++ b/api/gen/proto/go/teleport/trust/v1/trust_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/trust/v1/trust_service.proto diff --git a/api/gen/proto/go/teleport/userloginstate/v1/userloginstate.pb.go b/api/gen/proto/go/teleport/userloginstate/v1/userloginstate.pb.go index f187235904554..0a9d599a95cb6 100644 --- a/api/gen/proto/go/teleport/userloginstate/v1/userloginstate.pb.go +++ b/api/gen/proto/go/teleport/userloginstate/v1/userloginstate.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userloginstate/v1/userloginstate.proto diff --git a/api/gen/proto/go/teleport/userloginstate/v1/userloginstate_service.pb.go b/api/gen/proto/go/teleport/userloginstate/v1/userloginstate_service.pb.go index a6b8739ee3c3b..0e256fa3ac17f 100644 --- a/api/gen/proto/go/teleport/userloginstate/v1/userloginstate_service.pb.go +++ b/api/gen/proto/go/teleport/userloginstate/v1/userloginstate_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userloginstate/v1/userloginstate_service.proto diff --git a/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser.pb.go b/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser.pb.go index d35b9ecbe57f4..c2c00558af965 100644 --- a/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser.pb.go +++ b/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userprovisioning/v2/statichostuser.proto diff --git a/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser_service.pb.go b/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser_service.pb.go index aa0b0003d9055..9672571141aaa 100644 --- a/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser_service.pb.go +++ b/api/gen/proto/go/teleport/userprovisioning/v2/statichostuser_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userprovisioning/v2/statichostuser_service.proto diff --git a/api/gen/proto/go/teleport/users/v1/users_service.pb.go b/api/gen/proto/go/teleport/users/v1/users_service.pb.go index 8ba0773b86f3e..31d9eed83d150 100644 --- a/api/gen/proto/go/teleport/users/v1/users_service.pb.go +++ b/api/gen/proto/go/teleport/users/v1/users_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/users/v1/users_service.proto diff --git a/api/gen/proto/go/teleport/usertasks/v1/user_tasks.pb.go b/api/gen/proto/go/teleport/usertasks/v1/user_tasks.pb.go index 65e4e25b7e9c6..a2f0699e80399 100644 --- a/api/gen/proto/go/teleport/usertasks/v1/user_tasks.pb.go +++ b/api/gen/proto/go/teleport/usertasks/v1/user_tasks.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/usertasks/v1/user_tasks.proto diff --git a/api/gen/proto/go/teleport/usertasks/v1/user_tasks_service.pb.go b/api/gen/proto/go/teleport/usertasks/v1/user_tasks_service.pb.go index 4356cbbb9032e..7aebfdacce2c1 100644 --- a/api/gen/proto/go/teleport/usertasks/v1/user_tasks_service.pb.go +++ b/api/gen/proto/go/teleport/usertasks/v1/user_tasks_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/usertasks/v1/user_tasks_service.proto diff --git a/api/gen/proto/go/teleport/vnet/v1/vnet_config.pb.go b/api/gen/proto/go/teleport/vnet/v1/vnet_config.pb.go index 46ed1e982180c..5f1bf127ab286 100644 --- a/api/gen/proto/go/teleport/vnet/v1/vnet_config.pb.go +++ b/api/gen/proto/go/teleport/vnet/v1/vnet_config.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/vnet/v1/vnet_config.proto diff --git a/api/gen/proto/go/teleport/vnet/v1/vnet_config_service.pb.go b/api/gen/proto/go/teleport/vnet/v1/vnet_config_service.pb.go index f55eff558f365..4e7e5dcf431ad 100644 --- a/api/gen/proto/go/teleport/vnet/v1/vnet_config_service.pb.go +++ b/api/gen/proto/go/teleport/vnet/v1/vnet_config_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/vnet/v1/vnet_config_service.proto diff --git a/api/gen/proto/go/teleport/workloadidentity/v1/attrs.pb.go b/api/gen/proto/go/teleport/workloadidentity/v1/attrs.pb.go index b2e7023eabb94..60bbb16286951 100644 --- a/api/gen/proto/go/teleport/workloadidentity/v1/attrs.pb.go +++ b/api/gen/proto/go/teleport/workloadidentity/v1/attrs.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/workloadidentity/v1/attrs.proto diff --git a/api/gen/proto/go/teleport/workloadidentity/v1/issuance_service.pb.go b/api/gen/proto/go/teleport/workloadidentity/v1/issuance_service.pb.go index 48f8d95b87375..bf4c3699429e4 100644 --- a/api/gen/proto/go/teleport/workloadidentity/v1/issuance_service.pb.go +++ b/api/gen/proto/go/teleport/workloadidentity/v1/issuance_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/workloadidentity/v1/issuance_service.proto diff --git a/api/gen/proto/go/teleport/workloadidentity/v1/join_attrs.pb.go b/api/gen/proto/go/teleport/workloadidentity/v1/join_attrs.pb.go index f0bc0cfb3e7f5..8321094cd08ad 100644 --- a/api/gen/proto/go/teleport/workloadidentity/v1/join_attrs.pb.go +++ b/api/gen/proto/go/teleport/workloadidentity/v1/join_attrs.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/workloadidentity/v1/join_attrs.proto diff --git a/api/gen/proto/go/teleport/workloadidentity/v1/resource.pb.go b/api/gen/proto/go/teleport/workloadidentity/v1/resource.pb.go index 75a923c48c3fa..1849d7e902173 100644 --- a/api/gen/proto/go/teleport/workloadidentity/v1/resource.pb.go +++ b/api/gen/proto/go/teleport/workloadidentity/v1/resource.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/workloadidentity/v1/resource.proto diff --git a/api/gen/proto/go/teleport/workloadidentity/v1/resource_service.pb.go b/api/gen/proto/go/teleport/workloadidentity/v1/resource_service.pb.go index 0a32a67a1f38f..09d38919281c9 100644 --- a/api/gen/proto/go/teleport/workloadidentity/v1/resource_service.pb.go +++ b/api/gen/proto/go/teleport/workloadidentity/v1/resource_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/workloadidentity/v1/resource_service.proto diff --git a/api/gen/proto/go/userpreferences/v1/access_graph.pb.go b/api/gen/proto/go/userpreferences/v1/access_graph.pb.go index da763ea0f2e94..f8a7d4d538b47 100644 --- a/api/gen/proto/go/userpreferences/v1/access_graph.pb.go +++ b/api/gen/proto/go/userpreferences/v1/access_graph.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/access_graph.proto diff --git a/api/gen/proto/go/userpreferences/v1/assist.pb.go b/api/gen/proto/go/userpreferences/v1/assist.pb.go index e757960c0ed10..44ddb1846d73a 100644 --- a/api/gen/proto/go/userpreferences/v1/assist.pb.go +++ b/api/gen/proto/go/userpreferences/v1/assist.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/assist.proto diff --git a/api/gen/proto/go/userpreferences/v1/cluster_preferences.pb.go b/api/gen/proto/go/userpreferences/v1/cluster_preferences.pb.go index c77a39093f94d..3e4a2c8dd6667 100644 --- a/api/gen/proto/go/userpreferences/v1/cluster_preferences.pb.go +++ b/api/gen/proto/go/userpreferences/v1/cluster_preferences.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/cluster_preferences.proto diff --git a/api/gen/proto/go/userpreferences/v1/onboard.pb.go b/api/gen/proto/go/userpreferences/v1/onboard.pb.go index 4088c0ce805de..746788d07c81e 100644 --- a/api/gen/proto/go/userpreferences/v1/onboard.pb.go +++ b/api/gen/proto/go/userpreferences/v1/onboard.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/onboard.proto diff --git a/api/gen/proto/go/userpreferences/v1/sidenav_preferences.pb.go b/api/gen/proto/go/userpreferences/v1/sidenav_preferences.pb.go index 4ee547cce5245..307a03fbbc813 100644 --- a/api/gen/proto/go/userpreferences/v1/sidenav_preferences.pb.go +++ b/api/gen/proto/go/userpreferences/v1/sidenav_preferences.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/sidenav_preferences.proto diff --git a/api/gen/proto/go/userpreferences/v1/theme.pb.go b/api/gen/proto/go/userpreferences/v1/theme.pb.go index 62e274bbe9d62..c1850f157cc23 100644 --- a/api/gen/proto/go/userpreferences/v1/theme.pb.go +++ b/api/gen/proto/go/userpreferences/v1/theme.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/theme.proto diff --git a/api/gen/proto/go/userpreferences/v1/unified_resource_preferences.pb.go b/api/gen/proto/go/userpreferences/v1/unified_resource_preferences.pb.go index 0806e1631a803..8edaf4ab72e77 100644 --- a/api/gen/proto/go/userpreferences/v1/unified_resource_preferences.pb.go +++ b/api/gen/proto/go/userpreferences/v1/unified_resource_preferences.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/unified_resource_preferences.proto diff --git a/api/gen/proto/go/userpreferences/v1/userpreferences.pb.go b/api/gen/proto/go/userpreferences/v1/userpreferences.pb.go index c4280fee817c7..5023be960cbbf 100644 --- a/api/gen/proto/go/userpreferences/v1/userpreferences.pb.go +++ b/api/gen/proto/go/userpreferences/v1/userpreferences.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/userpreferences/v1/userpreferences.proto diff --git a/api/go.mod b/api/go.mod index 92b806326f5f2..4880a1bb6746e 100644 --- a/api/go.mod +++ b/api/go.mod @@ -28,7 +28,7 @@ require ( golang.org/x/term v0.28.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d google.golang.org/grpc v1.69.2 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.2 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/api/go.sum b/api/go.sum index a38974728a36a..13439603f2f1f 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1569,8 +1569,8 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/gen/proto/go/accessgraph/v1alpha/access_graph_service.pb.go b/gen/proto/go/accessgraph/v1alpha/access_graph_service.pb.go index f2e88f23d89c8..e8d3509d8a0c5 100644 --- a/gen/proto/go/accessgraph/v1alpha/access_graph_service.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/access_graph_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/access_graph_service.proto diff --git a/gen/proto/go/accessgraph/v1alpha/aws.pb.go b/gen/proto/go/accessgraph/v1alpha/aws.pb.go index 3a4f1065253e3..93140a69cb157 100644 --- a/gen/proto/go/accessgraph/v1alpha/aws.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/aws.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/aws.proto diff --git a/gen/proto/go/accessgraph/v1alpha/azure.pb.go b/gen/proto/go/accessgraph/v1alpha/azure.pb.go index b7ea6443a8964..011350400cb5a 100644 --- a/gen/proto/go/accessgraph/v1alpha/azure.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/azure.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/azure.proto diff --git a/gen/proto/go/accessgraph/v1alpha/entra.pb.go b/gen/proto/go/accessgraph/v1alpha/entra.pb.go index 5617e3154135c..d874a2ef9d873 100644 --- a/gen/proto/go/accessgraph/v1alpha/entra.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/entra.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/entra.proto diff --git a/gen/proto/go/accessgraph/v1alpha/events.pb.go b/gen/proto/go/accessgraph/v1alpha/events.pb.go index fd30d09ff450b..e6fbbc5fe596f 100644 --- a/gen/proto/go/accessgraph/v1alpha/events.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/events.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/events.proto diff --git a/gen/proto/go/accessgraph/v1alpha/gitlab.pb.go b/gen/proto/go/accessgraph/v1alpha/gitlab.pb.go index 23b3e914240dc..452480e75ff83 100644 --- a/gen/proto/go/accessgraph/v1alpha/gitlab.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/gitlab.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/gitlab.proto diff --git a/gen/proto/go/accessgraph/v1alpha/graph.pb.go b/gen/proto/go/accessgraph/v1alpha/graph.pb.go index 066df973254e9..8e0d9933a3a1b 100644 --- a/gen/proto/go/accessgraph/v1alpha/graph.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/graph.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/graph.proto diff --git a/gen/proto/go/accessgraph/v1alpha/netiq.pb.go b/gen/proto/go/accessgraph/v1alpha/netiq.pb.go index e39260c159875..87fad9b862d0c 100644 --- a/gen/proto/go/accessgraph/v1alpha/netiq.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/netiq.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/netiq.proto diff --git a/gen/proto/go/accessgraph/v1alpha/resources.pb.go b/gen/proto/go/accessgraph/v1alpha/resources.pb.go index fe7e9a261e694..74562f05e5099 100644 --- a/gen/proto/go/accessgraph/v1alpha/resources.pb.go +++ b/gen/proto/go/accessgraph/v1alpha/resources.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: accessgraph/v1alpha/resources.proto diff --git a/gen/proto/go/prehog/v1/teleport.pb.go b/gen/proto/go/prehog/v1/teleport.pb.go index 7dfca32709ee9..5370b8d25b4ae 100644 --- a/gen/proto/go/prehog/v1/teleport.pb.go +++ b/gen/proto/go/prehog/v1/teleport.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: prehog/v1/teleport.proto diff --git a/gen/proto/go/prehog/v1alpha/connect.pb.go b/gen/proto/go/prehog/v1alpha/connect.pb.go index a86322f17adbc..b8e7ce26a8c1d 100644 --- a/gen/proto/go/prehog/v1alpha/connect.pb.go +++ b/gen/proto/go/prehog/v1alpha/connect.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: prehog/v1alpha/connect.proto diff --git a/gen/proto/go/prehog/v1alpha/tbot.pb.go b/gen/proto/go/prehog/v1alpha/tbot.pb.go index 654c1f2645217..8dd4bc16c9823 100644 --- a/gen/proto/go/prehog/v1alpha/tbot.pb.go +++ b/gen/proto/go/prehog/v1alpha/tbot.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: prehog/v1alpha/tbot.proto diff --git a/gen/proto/go/prehog/v1alpha/teleport.pb.go b/gen/proto/go/prehog/v1alpha/teleport.pb.go index e67592e2b7984..df0bdf28abe52 100644 --- a/gen/proto/go/prehog/v1alpha/teleport.pb.go +++ b/gen/proto/go/prehog/v1alpha/teleport.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: prehog/v1alpha/teleport.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/access_request.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/access_request.pb.go index df21c55cc973e..1ad9384799160 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/access_request.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/access_request.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/access_request.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/app.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/app.pb.go index 222ee783e19c0..da3d6334d7726 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/app.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/app.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/app.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/auth_settings.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/auth_settings.pb.go index cadcb84907e9f..ade3418f4a932 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/auth_settings.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/auth_settings.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/auth_settings.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/cluster.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/cluster.pb.go index 51dbd7b580553..0c3ef15eaee63 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/cluster.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/cluster.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/cluster.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/database.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/database.pb.go index 8ce033e5c9c93..11df92928747b 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/database.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/database.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/database.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go index f2bc2e5b6c174..612afa9d557af 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/gateway.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/kube.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/kube.pb.go index 056780e54a086..e60d53f5ef84c 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/kube.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/kube.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/kube.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/label.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/label.pb.go index 115728588ead1..e3288a17f9965 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/label.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/label.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/label.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/server.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/server.pb.go index 2d26b10c2ebf4..1bb53ffdb3463 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/server.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/server.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/server.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/service.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/service.pb.go index 6a60ee36d074d..dc728548f7f37 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/service.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/service.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/tshd_events_service.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/tshd_events_service.pb.go index d4c27c8f47fcd..d2a0dca1760d0 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/tshd_events_service.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/tshd_events_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/tshd_events_service.proto diff --git a/gen/proto/go/teleport/lib/teleterm/v1/usage_events.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/usage_events.pb.go index 8b6bab199aaf1..a84e4397bbb17 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/usage_events.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/usage_events.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/v1/usage_events.proto diff --git a/gen/proto/go/teleport/lib/teleterm/vnet/v1/vnet_service.pb.go b/gen/proto/go/teleport/lib/teleterm/vnet/v1/vnet_service.pb.go index 285b72529c17b..ce192f1193f13 100644 --- a/gen/proto/go/teleport/lib/teleterm/vnet/v1/vnet_service.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/vnet/v1/vnet_service.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/teleterm/vnet/v1/vnet_service.proto diff --git a/gen/proto/go/teleport/quicpeering/v1alpha/dial.pb.go b/gen/proto/go/teleport/quicpeering/v1alpha/dial.pb.go index 1e6fed8a15dc1..25fa47d5de73e 100644 --- a/gen/proto/go/teleport/quicpeering/v1alpha/dial.pb.go +++ b/gen/proto/go/teleport/quicpeering/v1alpha/dial.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/quicpeering/v1alpha/dial.proto diff --git a/go.mod b/go.mod index 7e6e10cd7d882..e9f8023eb7323 100644 --- a/go.mod +++ b/go.mod @@ -215,7 +215,7 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d google.golang.org/grpc v1.69.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.2 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/dnaeon/go-vcr.v3 v3.2.0 gopkg.in/ini.v1 v1.67.0 diff --git a/go.sum b/go.sum index 8e7918257ef4e..c270f81730944 100644 --- a/go.sum +++ b/go.sum @@ -3116,8 +3116,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/integrations/event-handler/go.mod b/integrations/event-handler/go.mod index b7d0be9eb4684..873e43e03bce5 100644 --- a/integrations/event-handler/go.mod +++ b/integrations/event-handler/go.mod @@ -17,7 +17,7 @@ require ( github.com/stretchr/testify v1.10.0 golang.org/x/net v0.34.0 golang.org/x/time v0.9.0 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.2 ) require ( diff --git a/integrations/event-handler/go.sum b/integrations/event-handler/go.sum index e947423db8254..e88669e71da35 100644 --- a/integrations/event-handler/go.sum +++ b/integrations/event-handler/go.sum @@ -2343,8 +2343,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod index 81610ba23b74f..1d9aa5363d8cc 100644 --- a/integrations/terraform/go.mod +++ b/integrations/terraform/go.mod @@ -24,7 +24,7 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.10.0 google.golang.org/grpc v1.69.2 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.2 ) require ( diff --git a/integrations/terraform/go.sum b/integrations/terraform/go.sum index f996d8217a817..1a6cf422dd62e 100644 --- a/integrations/terraform/go.sum +++ b/integrations/terraform/go.sum @@ -2704,8 +2704,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/lib/multiplexer/test/ping.pb.go b/lib/multiplexer/test/ping.pb.go index 94cbf8e67b1b9..0b90164fc3d1f 100644 --- a/lib/multiplexer/test/ping.pb.go +++ b/lib/multiplexer/test/ping.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.1 +// protoc-gen-go v1.36.2 // protoc (unknown) // source: teleport/lib/multiplexer/test/ping.proto From 47bf4cad41f106f40b7dee25883d345b0a85e340 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Skrz=C4=99tnicki?= Date: Wed, 8 Jan 2025 14:58:15 +0100 Subject: [PATCH 08/45] Bump e (#50858) --- e | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e b/e index 1bc4a6909732d..b486de24a443a 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit 1bc4a6909732d3b8b98b19fab56d9a39f228ec01 +Subproject commit b486de24a443a9f8eb3e349009af14d11814ff5c From cf01dc4cc4ef9b06618ab889985e6a1763bac4ea Mon Sep 17 00:00:00 2001 From: Vadym Popov Date: Wed, 8 Jan 2025 08:46:37 -0800 Subject: [PATCH 09/45] Add client tools auto update tctl commands (#47692) * Add client tools auto update tctl commands * Always print version for watch command Restrict update empty target version Rename command to upsert * Add alias on/off for tools mode Rename update command to configure * Add semantic version validation * Drop watch command for autoupdate * Replace Upsert with Update/Create Add format option for output json/yaml * Change update message * Use get/set naming for client-tools * Add mode to response * Change sub-command help messages Leave only aliases for enabled/disabled * Reorganize tctl commands to have commands not required auth client * Propagate insecure flag with global config to commands by context * Fix autoupdate command without auth client * Change commands to enable/disable/target * Add retry in case of the parallel request * Add more than one retry Code review changes * Update tool/tctl/common/autoupdate_command.go Co-authored-by: rosstimothy <39066650+rosstimothy@users.noreply.github.com> --------- Co-authored-by: rosstimothy <39066650+rosstimothy@users.noreply.github.com> --- tool/tctl/common/autoupdate_command.go | 296 ++++++++++++++++++++ tool/tctl/common/autoupdate_command_test.go | 118 ++++++++ tool/tctl/common/cmds.go | 1 + tool/tctl/common/helpers_test.go | 3 +- 4 files changed, 416 insertions(+), 2 deletions(-) create mode 100644 tool/tctl/common/autoupdate_command.go create mode 100644 tool/tctl/common/autoupdate_command_test.go diff --git a/tool/tctl/common/autoupdate_command.go b/tool/tctl/common/autoupdate_command.go new file mode 100644 index 0000000000000..c089010c091f4 --- /dev/null +++ b/tool/tctl/common/autoupdate_command.go @@ -0,0 +1,296 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/alecthomas/kingpin/v2" + "github.com/coreos/go-semver/semver" + "github.com/gravitational/trace" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/api/client/webclient" + autoupdatev1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/autoupdate/v1" + "github.com/gravitational/teleport/api/types/autoupdate" + "github.com/gravitational/teleport/lib/auth/authclient" + "github.com/gravitational/teleport/lib/service/servicecfg" + "github.com/gravitational/teleport/lib/utils" + commonclient "github.com/gravitational/teleport/tool/tctl/common/client" + tctlcfg "github.com/gravitational/teleport/tool/tctl/common/config" +) + +// maxRetries is the default number of RPC call retries to prevent parallel create/update errors. +const maxRetries = 3 + +// AutoUpdateCommand implements the `tctl autoupdate` command for managing +// autoupdate process for tools and agents. +type AutoUpdateCommand struct { + app *kingpin.Application + ccf *tctlcfg.GlobalCLIFlags + + targetCmd *kingpin.CmdClause + enableCmd *kingpin.CmdClause + disableCmd *kingpin.CmdClause + statusCmd *kingpin.CmdClause + + toolsTargetVersion string + proxy string + format string + + clear bool + + // stdout allows to switch standard output source for resource command. Used in tests. + stdout io.Writer +} + +// Initialize allows AutoUpdateCommand to plug itself into the CLI parser. +func (c *AutoUpdateCommand) Initialize(app *kingpin.Application, ccf *tctlcfg.GlobalCLIFlags, _ *servicecfg.Config) { + c.app = app + c.ccf = ccf + autoUpdateCmd := app.Command("autoupdate", "Manage auto update configuration.") + + clientToolsCmd := autoUpdateCmd.Command("client-tools", "Manage client tools auto update configuration.") + + c.statusCmd = clientToolsCmd.Command("status", "Prints if the client tools updates are enabled/disabled, and the target version in specified format.") + c.statusCmd.Flag("proxy", "Address of the Teleport proxy. When defined this address will be used to retrieve client tools auto update configuration.").StringVar(&c.proxy) + c.statusCmd.Flag("format", "Output format: 'yaml' or 'json'").Default(teleport.YAML).StringVar(&c.format) + + c.enableCmd = clientToolsCmd.Command("enable", "Enables client tools auto updates. Clients will be told to update to the target version.") + c.disableCmd = clientToolsCmd.Command("disable", "Disables client tools auto updates. Clients will not be told to update to the target version.") + + c.targetCmd = clientToolsCmd.Command("target", "Sets the client tools target version. This command is not supported on Teleport Cloud.") + c.targetCmd.Arg("version", "Client tools target version. Clients will be told to update to this version.").StringVar(&c.toolsTargetVersion) + c.targetCmd.Flag("clear", "removes the target version, Teleport will default to its current proxy version.").BoolVar(&c.clear) + + if c.stdout == nil { + c.stdout = os.Stdout + } +} + +// TryRun takes the CLI command as an argument and executes it. +func (c *AutoUpdateCommand) TryRun(ctx context.Context, cmd string, clientFunc commonclient.InitFunc) (match bool, err error) { + var commandFunc func(ctx context.Context, client *authclient.Client) error + switch { + case cmd == c.targetCmd.FullCommand(): + commandFunc = c.TargetVersion + case cmd == c.enableCmd.FullCommand(): + commandFunc = c.SetModeCommand(true) + case cmd == c.disableCmd.FullCommand(): + commandFunc = c.SetModeCommand(false) + case c.proxy == "" && cmd == c.statusCmd.FullCommand(): + commandFunc = c.Status + case c.proxy != "" && cmd == c.statusCmd.FullCommand(): + err = c.StatusByProxy(ctx) + return true, trace.Wrap(err) + default: + return false, nil + } + + client, closeFn, err := clientFunc(ctx) + if err != nil { + return false, trace.Wrap(err) + } + err = commandFunc(ctx, client) + closeFn(ctx) + + return true, trace.Wrap(err) +} + +// TargetVersion creates or updates AutoUpdateVersion resource with client tools target version. +func (c *AutoUpdateCommand) TargetVersion(ctx context.Context, client *authclient.Client) error { + var err error + switch { + case c.clear: + err = c.clearTargetVersion(ctx, client) + case c.toolsTargetVersion != "": + // For parallel requests where we attempt to create a resource simultaneously, retries should be implemented. + // The same approach applies to updates if the resource has been deleted during the process. + // Second create request must return `AlreadyExists` error, update for deleted resource `NotFound` error. + for i := 0; i < maxRetries; i++ { + err = c.setTargetVersion(ctx, client) + if err == nil { + break + } + if !trace.IsNotFound(err) && !trace.IsAlreadyExists(err) { + return trace.Wrap(err) + } + } + } + return trace.Wrap(err) +} + +// SetModeCommand returns a command to enable or disable client tools auto-updates in the cluster. +func (c *AutoUpdateCommand) SetModeCommand(enabled bool) func(ctx context.Context, client *authclient.Client) error { + return func(ctx context.Context, client *authclient.Client) error { + // For parallel requests where we attempt to create a resource simultaneously, retries should be implemented. + // The same approach applies to updates if the resource has been deleted during the process. + // Second create request must return `AlreadyExists` error, update for deleted resource `NotFound` error. + for i := 0; i < maxRetries; i++ { + err := c.setMode(ctx, client, enabled) + if err == nil { + break + } + if !trace.IsNotFound(err) && !trace.IsAlreadyExists(err) { + return trace.Wrap(err) + } + } + return nil + } +} + +// getResponse is structure for formatting the client tools auto update response. +type getResponse struct { + Mode string `json:"mode"` + TargetVersion string `json:"target_version"` +} + +// Status makes request to auth service to fetch client tools auto update version and mode. +func (c *AutoUpdateCommand) Status(ctx context.Context, client *authclient.Client) error { + var response getResponse + config, err := client.GetAutoUpdateConfig(ctx) + if err != nil && !trace.IsNotFound(err) { + return trace.Wrap(err) + } + if config != nil && config.Spec.Tools != nil { + response.Mode = config.Spec.Tools.Mode + } + + version, err := client.GetAutoUpdateVersion(ctx) + if err != nil && !trace.IsNotFound(err) { + return trace.Wrap(err) + } + if version != nil && version.Spec.Tools != nil { + response.TargetVersion = version.Spec.Tools.TargetVersion + } + + return c.printResponse(response) +} + +// StatusByProxy makes request to `webapi/find` endpoint to fetch tools auto update version and mode +// without authentication. +func (c *AutoUpdateCommand) StatusByProxy(ctx context.Context) error { + find, err := webclient.Find(&webclient.Config{ + Context: ctx, + ProxyAddr: c.proxy, + Insecure: c.ccf.Insecure, + }) + if err != nil { + return trace.Wrap(err) + } + mode := autoupdate.ToolsUpdateModeDisabled + if find.AutoUpdate.ToolsAutoUpdate { + mode = autoupdate.ToolsUpdateModeEnabled + } + return c.printResponse(getResponse{ + TargetVersion: find.AutoUpdate.ToolsVersion, + Mode: mode, + }) +} + +func (c *AutoUpdateCommand) setMode(ctx context.Context, client *authclient.Client, enabled bool) error { + setMode := client.UpdateAutoUpdateConfig + config, err := client.GetAutoUpdateConfig(ctx) + if trace.IsNotFound(err) { + if config, err = autoupdate.NewAutoUpdateConfig(&autoupdatev1pb.AutoUpdateConfigSpec{}); err != nil { + return trace.Wrap(err) + } + setMode = client.CreateAutoUpdateConfig + } else if err != nil { + return trace.Wrap(err) + } + + if config.Spec.Tools == nil { + config.Spec.Tools = &autoupdatev1pb.AutoUpdateConfigSpecTools{} + } + + config.Spec.Tools.Mode = autoupdate.ToolsUpdateModeDisabled + if enabled { + config.Spec.Tools.Mode = autoupdate.ToolsUpdateModeEnabled + } + if _, err := setMode(ctx, config); err != nil { + return trace.Wrap(err) + } + fmt.Fprintln(c.stdout, "client tools auto update mode has been changed") + + return nil +} + +func (c *AutoUpdateCommand) setTargetVersion(ctx context.Context, client *authclient.Client) error { + if _, err := semver.NewVersion(c.toolsTargetVersion); err != nil { + return trace.WrapWithMessage(err, "not semantic version") + } + setTargetVersion := client.UpdateAutoUpdateVersion + version, err := client.GetAutoUpdateVersion(ctx) + if trace.IsNotFound(err) { + if version, err = autoupdate.NewAutoUpdateVersion(&autoupdatev1pb.AutoUpdateVersionSpec{}); err != nil { + return trace.Wrap(err) + } + setTargetVersion = client.CreateAutoUpdateVersion + } else if err != nil { + return trace.Wrap(err) + } + if version.Spec.Tools == nil { + version.Spec.Tools = &autoupdatev1pb.AutoUpdateVersionSpecTools{} + } + if version.Spec.Tools.TargetVersion != c.toolsTargetVersion { + version.Spec.Tools.TargetVersion = c.toolsTargetVersion + if _, err := setTargetVersion(ctx, version); err != nil { + return trace.Wrap(err) + } + fmt.Fprintln(c.stdout, "client tools auto update target version has been set") + } + return nil +} + +func (c *AutoUpdateCommand) clearTargetVersion(ctx context.Context, client *authclient.Client) error { + version, err := client.GetAutoUpdateVersion(ctx) + if trace.IsNotFound(err) { + return nil + } else if err != nil { + return trace.Wrap(err) + } + if version.Spec.Tools != nil { + version.Spec.Tools = nil + if _, err := client.UpdateAutoUpdateVersion(ctx, version); err != nil { + return trace.Wrap(err) + } + fmt.Fprintln(c.stdout, "client tools auto update target version has been cleared") + } + return nil +} + +func (c *AutoUpdateCommand) printResponse(response getResponse) error { + switch c.format { + case teleport.JSON: + if err := utils.WriteJSON(c.stdout, response); err != nil { + return trace.Wrap(err) + } + case teleport.YAML: + if err := utils.WriteYAML(c.stdout, response); err != nil { + return trace.Wrap(err) + } + default: + return trace.BadParameter("unsupported output format %s, supported values are %s and %s", c.format, teleport.JSON, teleport.YAML) + } + return nil +} diff --git a/tool/tctl/common/autoupdate_command_test.go b/tool/tctl/common/autoupdate_command_test.go new file mode 100644 index 0000000000000..31d2782fbc335 --- /dev/null +++ b/tool/tctl/common/autoupdate_command_test.go @@ -0,0 +1,118 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "bytes" + "context" + "testing" + + "github.com/gravitational/trace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/gravitational/teleport/api/breaker" + "github.com/gravitational/teleport/lib/auth/authclient" + "github.com/gravitational/teleport/lib/service/servicecfg" + "github.com/gravitational/teleport/lib/utils" + tctlcfg "github.com/gravitational/teleport/tool/tctl/common/config" + "github.com/gravitational/teleport/tool/teleport/testenv" +) + +// TestClientToolsAutoUpdateCommands verifies all commands related to client auto updates, by +// enabling/disabling auto update, setting the target version and retrieve it. +func TestClientToolsAutoUpdateCommands(t *testing.T) { + ctx := context.Background() + log := utils.NewSlogLoggerForTests() + process := testenv.MakeTestServer(t, testenv.WithLogger(log)) + authClient := testenv.MakeDefaultAuthClient(t, process) + + // Check that AutoUpdateConfig and AutoUpdateVersion are not created. + _, err := authClient.GetAutoUpdateConfig(ctx) + require.True(t, trace.IsNotFound(err)) + _, err = authClient.GetAutoUpdateVersion(ctx) + require.True(t, trace.IsNotFound(err)) + + // Enable client tools auto updates to check that AutoUpdateConfig resource is modified. + _, err = runAutoUpdateCommand(t, authClient, []string{"client-tools", "enable"}) + require.NoError(t, err) + + config, err := authClient.GetAutoUpdateConfig(ctx) + require.NoError(t, err) + assert.Equal(t, "enabled", config.Spec.Tools.Mode) + + // Disable client tools auto updates to check that AutoUpdateConfig resource is modified. + _, err = runAutoUpdateCommand(t, authClient, []string{"client-tools", "disable"}) + require.NoError(t, err) + + config, err = authClient.GetAutoUpdateConfig(ctx) + require.NoError(t, err) + assert.Equal(t, "disabled", config.Spec.Tools.Mode) + + // Set target version for client tools auto updates. + _, err = runAutoUpdateCommand(t, authClient, []string{"client-tools", "target", "1.2.3"}) + require.NoError(t, err) + + version, err := authClient.GetAutoUpdateVersion(ctx) + require.NoError(t, err) + assert.Equal(t, "1.2.3", version.Spec.Tools.TargetVersion) + + getBuf, err := runAutoUpdateCommand(t, authClient, []string{"client-tools", "status", "--format=json"}) + require.NoError(t, err) + response := mustDecodeJSON[getResponse](t, getBuf) + assert.Equal(t, "1.2.3", response.TargetVersion) + assert.Equal(t, "disabled", response.Mode) + + // Make same request with proxy flag to read command expecting the same + // response from `webapi/find` endpoint. + proxy, err := process.ProxyWebAddr() + require.NoError(t, err) + getProxyBuf, err := runAutoUpdateCommand(t, authClient, []string{"client-tools", "status", "--proxy=" + proxy.Addr, "--format=json"}) + require.NoError(t, err) + response = mustDecodeJSON[getResponse](t, getProxyBuf) + assert.Equal(t, "1.2.3", response.TargetVersion) + assert.Equal(t, "disabled", response.Mode) + + // Set clear flag for the target version update to check that it is going to be reset. + _, err = runAutoUpdateCommand(t, authClient, []string{"client-tools", "target", "--clear"}) + require.NoError(t, err) + version, err = authClient.GetAutoUpdateVersion(ctx) + require.NoError(t, err) + assert.Nil(t, version.Spec.Tools) +} + +func runAutoUpdateCommand(t *testing.T, client *authclient.Client, args []string) (*bytes.Buffer, error) { + var stdoutBuff bytes.Buffer + command := &AutoUpdateCommand{ + stdout: &stdoutBuff, + } + + cfg := servicecfg.MakeDefaultConfig() + cfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() + app := utils.InitCLIParser("tctl", GlobalHelpString) + command.Initialize(app, &tctlcfg.GlobalCLIFlags{Insecure: true}, cfg) + + selectedCmd, err := app.Parse(append([]string{"autoupdate"}, args...)) + require.NoError(t, err) + + _, err = command.TryRun(context.Background(), selectedCmd, func(ctx context.Context) (*authclient.Client, func(context.Context), error) { + return client, func(context.Context) {}, nil + }) + return &stdoutBuff, err +} diff --git a/tool/tctl/common/cmds.go b/tool/tctl/common/cmds.go index 4b9745ac38a10..2cd7b7a579802 100644 --- a/tool/tctl/common/cmds.go +++ b/tool/tctl/common/cmds.go @@ -66,5 +66,6 @@ func Commands() []CLICommand { &webauthnwinCommand{}, &touchIDCommand{}, &TerraformCommand{}, + &AutoUpdateCommand{}, } } diff --git a/tool/tctl/common/helpers_test.go b/tool/tctl/common/helpers_test.go index d9649391427a7..0cf773852c96f 100644 --- a/tool/tctl/common/helpers_test.go +++ b/tool/tctl/common/helpers_test.go @@ -75,8 +75,7 @@ func runCommand(t *testing.T, client *authclient.Client, cmd cliCommand, args [] selectedCmd, err := app.Parse(args) require.NoError(t, err) - ctx := context.Background() - _, err = cmd.TryRun(ctx, selectedCmd, func(ctx context.Context) (*authclient.Client, func(context.Context), error) { + _, err = cmd.TryRun(context.Background(), selectedCmd, func(ctx context.Context) (*authclient.Client, func(context.Context), error) { return client, func(context.Context) {}, nil }) return err From 1bc68f7d198d778797f31a2a97efb37de8f17f79 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:14:31 -0500 Subject: [PATCH 10/45] Convert logging in build.assets/tooling to use slog (#50805) --- .../cmd/protoc-gen-eventschema/debug.go | 7 ++++--- .../cmd/protoc-gen-eventschema/main.go | 14 ++++++++------ .../tooling/cmd/render-helm-ref/main.go | 19 ++++++++++++------- build.assets/tooling/go.mod | 2 -- build.assets/tooling/go.sum | 5 ----- 5 files changed, 24 insertions(+), 23 deletions(-) diff --git a/build.assets/tooling/cmd/protoc-gen-eventschema/debug.go b/build.assets/tooling/cmd/protoc-gen-eventschema/debug.go index 575dbe83e17a1..4da934ce4e019 100644 --- a/build.assets/tooling/cmd/protoc-gen-eventschema/debug.go +++ b/build.assets/tooling/cmd/protoc-gen-eventschema/debug.go @@ -25,14 +25,15 @@ package main // inspect what is happening inside the plugin. import ( + "context" "io" + "log/slog" "os" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/protoc-gen-gogo/generator" plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" ) const pluginInputPathEnvironment = "TELEPORT_PROTOC_READ_FILE" @@ -40,10 +41,10 @@ const pluginInputPathEnvironment = "TELEPORT_PROTOC_READ_FILE" func readRequest() (*plugin.CodeGeneratorRequest, error) { inputPath := os.Getenv(pluginInputPathEnvironment) if inputPath == "" { - log.Error(trace.BadParameter("When built with the 'debug' tag, the input path must be set through the environment variable: %s", pluginInputPathEnvironment)) + slog.ErrorContext(context.Background(), "When built with the 'debug' tag, the input path must be set through the TELEPORT_PROTOC_READ_FILE environment variable") os.Exit(-1) } - log.Infof("This is a debug build, the protoc request is read from the file: '%s'", inputPath) + slog.InfoContext(context.Background(), "This is a debug build, the protoc request is read from provided file", "file", inputPath) req, err := readRequestFromFile(inputPath) if err != nil { diff --git a/build.assets/tooling/cmd/protoc-gen-eventschema/main.go b/build.assets/tooling/cmd/protoc-gen-eventschema/main.go index 8480ad31a7594..a2d2210e0386a 100644 --- a/build.assets/tooling/cmd/protoc-gen-eventschema/main.go +++ b/build.assets/tooling/cmd/protoc-gen-eventschema/main.go @@ -19,21 +19,23 @@ package main import ( + "context" + "log/slog" "os" - - log "github.com/sirupsen/logrus" ) func main() { - log.SetLevel(log.DebugLevel) - log.SetOutput(os.Stderr) + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug})) + slog.SetDefault(logger) + + ctx := context.Background() req, err := readRequest() if err != nil { - log.WithError(err).Error("Failed to read request") + logger.ErrorContext(ctx, "Failed to read request", "error", err) os.Exit(-1) } if err := handleRequest(req); err != nil { - log.WithError(err).Error("Failed to generate schema") + logger.ErrorContext(ctx, "Failed to generate schema", "error", err) os.Exit(-1) } } diff --git a/build.assets/tooling/cmd/render-helm-ref/main.go b/build.assets/tooling/cmd/render-helm-ref/main.go index 2026c2147672a..5cfa13b35ebeb 100644 --- a/build.assets/tooling/cmd/render-helm-ref/main.go +++ b/build.assets/tooling/cmd/render-helm-ref/main.go @@ -20,15 +20,16 @@ package main import ( "bufio" + "context" "encoding/json" "flag" "fmt" + "log/slog" "os" "regexp" "strings" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" "helm.sh/helm/v3/pkg/chart/loader" ) @@ -52,14 +53,15 @@ func main() { flag.StringVar(&outputPath, "output", "-", "Path of the generated markdown reference, '-' means stdout.") flag.Parse() + ctx := context.Background() if chartPath == "" { - log.Error(trace.BadParameter("chart path must be specified")) + slog.ErrorContext(ctx, "chart path must be specified") os.Exit(1) } reference, err := parseAndRender(chartPath) if err != nil { - log.Errorf("failed parsing chart and rendering reference: %s", err) + slog.ErrorContext(ctx, "failed parsing chart and rendering reference", "error", err) os.Exit(1) } @@ -69,10 +71,10 @@ func main() { } err = os.WriteFile(outputPath, reference, 0o644) if err != nil { - log.Errorf("failed writing file: %s", err) + slog.ErrorContext(ctx, "failed writing file", "error", err) os.Exit(1) } - log.Infof("File %s successfully written", outputPath) + slog.InfoContext(ctx, "File successfully written", "file_path", outputPath) } func parseAndRender(chartPath string) ([]byte, error) { @@ -106,7 +108,10 @@ func parseAndRender(chartPath string) ([]byte, error) { if value.Kind != "" && value.Default == "" { defaultValue, err := getDefaultForValue(value.Name, chrt.Values) if err != nil { - log.Warnf("failed to get default for value %s, error: %s", value.Name, err) + slog.WarnContext(context.Background(), "failed to look up default value", + "value", value.Name, + "error", err, + ) } else { value.Default = string(defaultValue) } @@ -227,7 +232,7 @@ func cleanLine(line string) string { return "" } if line2[0] != '#' { - log.Warnf("Misformatted line: %s", line) + slog.WarnContext(context.Background(), "Misformatted line", "line", line) return "" } return line2[2:] diff --git a/build.assets/tooling/go.mod b/build.assets/tooling/go.mod index e87bf5d8680d4..d726a7c08fd94 100644 --- a/build.assets/tooling/go.mod +++ b/build.assets/tooling/go.mod @@ -10,7 +10,6 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/go-github/v41 v41.0.0 github.com/gravitational/trace v1.4.0 - github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.10.0 github.com/waigani/diffparser v0.0.0-20190828052634-7391f219313d golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 @@ -48,7 +47,6 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.26.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/build.assets/tooling/go.sum b/build.assets/tooling/go.sum index 7e613ad03bde2..7391483f6ceb1 100644 --- a/build.assets/tooling/go.sum +++ b/build.assets/tooling/go.sum @@ -881,8 +881,6 @@ github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfF github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= @@ -1183,7 +1181,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1197,8 +1194,6 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= From 62dbb2cf65b3fa8c304b3d136d665ccfcb54b661 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 8 Jan 2025 11:42:37 -0600 Subject: [PATCH 11/45] Display View button if user can list roles (#50872) This fixes a regression that caused users who could list/read roles but NOT edit them to be unable to view the details of the role. This PR will change the text to "view details" if the user can _only_ view and not edit. The edit button in the role editor already has logic to prevent and tell the user they do not have permissions to edit, so no further changes need to be made there. --- .../teleport/src/Roles/RoleList/RoleList.tsx | 11 +++- .../teleport/src/Roles/Roles.test.tsx | 50 ++++++++++++++++--- 2 files changed, 51 insertions(+), 10 deletions(-) diff --git a/web/packages/teleport/src/Roles/RoleList/RoleList.tsx b/web/packages/teleport/src/Roles/RoleList/RoleList.tsx index baf15b596c534..3fec1ddd63867 100644 --- a/web/packages/teleport/src/Roles/RoleList/RoleList.tsx +++ b/web/packages/teleport/src/Roles/RoleList/RoleList.tsx @@ -39,6 +39,7 @@ export function RoleList({ serversidePagination: SeversidePagination; rolesAcl: Access; }) { + const canView = rolesAcl.list && rolesAcl.read; const canEdit = rolesAcl.edit; const canDelete = rolesAcl.remove; @@ -72,6 +73,7 @@ export function RoleList({ altKey: 'options-btn', render: (role: RoleResource) => ( onEdit(role.id)} @@ -87,18 +89,23 @@ export function RoleList({ } const ActionCell = (props: { + canView: boolean; canEdit: boolean; canDelete: boolean; onEdit(): void; onDelete(): void; }) => { - if (!(props.canEdit || props.canDelete)) { + if (!(props.canView || props.canDelete)) { return ; } return ( - {props.canEdit && Edit} + {props.canView && ( + + {props.canEdit ? 'Edit' : 'View Details'} + + )} {props.canDelete && ( Delete )} diff --git a/web/packages/teleport/src/Roles/Roles.test.tsx b/web/packages/teleport/src/Roles/Roles.test.tsx index 1a4338cac036e..071c1e10eea3d 100644 --- a/web/packages/teleport/src/Roles/Roles.test.tsx +++ b/web/packages/teleport/src/Roles/Roles.test.tsx @@ -120,13 +120,13 @@ describe('Roles list', () => { expect(menuItems).toHaveLength(2); }); - test('hides edit button if no access', async () => { + test('hides view/edit button if no access', async () => { const ctx = createTeleportContext(); const testState = { ...defaultState, rolesAcl: { ...defaultState.rolesAcl, - edit: false, + list: false, }, }; @@ -147,12 +147,15 @@ describe('Roles list', () => { fireEvent.click(optionsButton); const menuItems = screen.queryAllByRole('menuitem'); expect(menuItems).toHaveLength(1); - expect(menuItems.every(item => item.textContent.includes('Edit'))).not.toBe( - true - ); + expect( + menuItems.every( + item => + item.textContent.includes('View') || item.textContent.includes('Edit') + ) + ).not.toBe(true); }); - test('hides delete button if no access', async () => { + test('hides delete button if user does not have permission to delete', async () => { const ctx = createTeleportContext(); const testState = { ...defaultState, @@ -184,12 +187,14 @@ describe('Roles list', () => { ).not.toBe(true); }); - test('hides Options button if no permissions to edit or delete', async () => { + test('displays Options button if user has permission to list/read roles', async () => { const ctx = createTeleportContext(); const testState = { ...defaultState, rolesAcl: { - ...defaultState.rolesAcl, + list: true, + read: true, + create: false, remove: false, edit: false, }, @@ -203,6 +208,35 @@ describe('Roles list', () => { ); + await waitFor(() => { + expect(screen.getByText('cool-role')).toBeInTheDocument(); + }); + const optionsButton = screen.getByRole('button', { name: /options/i }); + fireEvent.click(optionsButton); + const menuItems = screen.queryAllByRole('menuitem'); + expect(menuItems).toHaveLength(1); + expect(menuItems[0]).toHaveTextContent('View'); + }); + + test('hides Options button if no permissions to view or delete', async () => { + const ctx = createTeleportContext(); + const testState = { + ...defaultState, + rolesAcl: { + ...defaultState.rolesAcl, + remove: false, + list: false, + }, + }; + + render( + + + + + + ); + await waitFor(() => { expect(screen.getByText('cool-role')).toBeInTheDocument(); }); From 20828a217d7c7acd295683cec16ce2732c108df2 Mon Sep 17 00:00:00 2001 From: Gavin Frazar Date: Wed, 8 Jan 2025 10:19:09 -0800 Subject: [PATCH 12/45] Simplify awsconfig loading (#50809) This replaces awsconfig.WithIntegrationCredentialProvider option with the awsconfig.WithOIDCIntegrationClient option. This solves a chicken/egg problem with AWS config loading - callers no longer need to load AWS config (to create a credential provider) to load AWS config. The OIDCIntegrationClient interface is also much simpler to implement. This also adds default option overrides when creating an awsconfig.Cache. For now, this is used to add an OIDCIntegrationClient when creating the cache so that dependent callers don't have to. --- lib/cloud/awsconfig/awsconfig.go | 129 +++++++++--- lib/cloud/awsconfig/awsconfig_test.go | 194 ++++++++++++------ lib/cloud/awsconfig/cache.go | 36 +++- lib/cloud/mocks/aws_config.go | 35 +++- lib/cloud/mocks/aws_sts.go | 19 +- lib/integrations/awsoidc/clientsv1.go | 3 - lib/srv/discovery/discovery.go | 34 +-- lib/srv/discovery/discovery_test.go | 45 ++-- lib/srv/discovery/fetchers/db/aws.go | 3 - lib/srv/discovery/fetchers/db/aws_redshift.go | 1 - lib/srv/discovery/fetchers/db/db.go | 22 +- 11 files changed, 349 insertions(+), 172 deletions(-) diff --git a/lib/cloud/awsconfig/awsconfig.go b/lib/cloud/awsconfig/awsconfig.go index 8be00483f4012..7b1cabe5ffe75 100644 --- a/lib/cloud/awsconfig/awsconfig.go +++ b/lib/cloud/awsconfig/awsconfig.go @@ -28,6 +28,7 @@ import ( "github.com/gravitational/trace" "go.opentelemetry.io/otel" + "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/modules" ) @@ -43,12 +44,25 @@ const ( credentialsSourceIntegration ) -// IntegrationSessionProviderFunc defines a function that creates a credential provider from a region and an integration. -// This is used to generate aws configs for clients that must use an integration instead of ambient credentials. -type IntegrationCredentialProviderFunc func(ctx context.Context, region, integration string) (aws.CredentialsProvider, error) +// OIDCIntegrationClient is an interface that indicates which APIs are +// required to generate an AWS OIDC integration token. +type OIDCIntegrationClient interface { + // GetIntegration returns the specified integration resource. + GetIntegration(ctx context.Context, name string) (types.Integration, error) -// AssumeRoleClientProviderFunc provides an AWS STS assume role API client. -type AssumeRoleClientProviderFunc func(aws.Config) stscreds.AssumeRoleAPIClient + // GenerateAWSOIDCToken generates a token to be used to execute an AWS OIDC + // Integration action. + GenerateAWSOIDCToken(ctx context.Context, integrationName string) (string, error) +} + +// STSClient is a subset of the AWS STS API. +type STSClient interface { + stscreds.AssumeRoleAPIClient + stscreds.AssumeRoleWithWebIdentityAPIClient +} + +// STSClientProviderFunc provides an AWS STS assume role API client. +type STSClientProviderFunc func(aws.Config) STSClient // AssumeRole is an AWS role to assume, optionally with an external ID. type AssumeRole struct { @@ -68,14 +82,16 @@ type options struct { credentialsSource credentialsSource // integration is the name of the integration to be used to fetch the credentials. integration string - // integrationCredentialsProvider is the integration credential provider to use. - integrationCredentialsProvider IntegrationCredentialProviderFunc + // oidcIntegrationClient provides APIs to generate AWS OIDC tokens, which + // can then be exchanged for IAM credentials. + // Required if integration credentials are requested. + oidcIntegrationClient OIDCIntegrationClient // customRetryer is a custom retryer to use for the config. customRetryer func() aws.Retryer // maxRetries is the maximum number of retries to use for the config. maxRetries *int - // assumeRoleClientProvider sets the STS assume role client provider func. - assumeRoleClientProvider AssumeRoleClientProviderFunc + // stsClientProvider sets the STS assume role client provider func. + stsClientProvider STSClientProviderFunc } func buildOptions(optFns ...OptionsFn) (*options, error) { @@ -99,6 +115,9 @@ func (o *options) checkAndSetDefaults() error { if o.integration == "" { return trace.BadParameter("missing integration name") } + if o.oidcIntegrationClient == nil { + return trace.BadParameter("missing AWS OIDC integration client") + } default: return trace.BadParameter("missing credentials source (ambient or integration)") } @@ -106,8 +125,8 @@ func (o *options) checkAndSetDefaults() error { return trace.BadParameter("role chain contains more than 2 roles") } - if o.assumeRoleClientProvider == nil { - o.assumeRoleClientProvider = func(cfg aws.Config) stscreds.AssumeRoleAPIClient { + if o.stsClientProvider == nil { + o.stsClientProvider = func(cfg aws.Config) STSClient { return sts.NewFromConfig(cfg, func(o *sts.Options) { o.TracerProvider = smithyoteltracing.Adapt(otel.GetTracerProvider()) }) @@ -175,18 +194,17 @@ func WithAmbientCredentials() OptionsFn { } } -// WithIntegrationCredentialProvider sets the integration credential provider. -func WithIntegrationCredentialProvider(cred IntegrationCredentialProviderFunc) OptionsFn { +// WithSTSClientProvider sets the STS API client factory func. +func WithSTSClientProvider(fn STSClientProviderFunc) OptionsFn { return func(options *options) { - options.integrationCredentialsProvider = cred + options.stsClientProvider = fn } } -// WithAssumeRoleClientProviderFunc sets the STS API client factory func used to -// assume roles. -func WithAssumeRoleClientProviderFunc(fn AssumeRoleClientProviderFunc) OptionsFn { +// WithOIDCIntegrationClient sets the OIDC integration client. +func WithOIDCIntegrationClient(c OIDCIntegrationClient) OptionsFn { return func(options *options) { - options.assumeRoleClientProvider = fn + options.oidcIntegrationClient = c } } @@ -202,7 +220,7 @@ func GetConfig(ctx context.Context, region string, optFns ...OptionsFn) (aws.Con if err != nil { return aws.Config{}, trace.Wrap(err) } - return getConfigForRoleChain(ctx, cfg, opts.assumeRoles, opts.assumeRoleClientProvider) + return getConfigForRoleChain(ctx, cfg, opts.assumeRoles, opts.stsClientProvider) } // loadDefaultConfig loads a new config. @@ -217,6 +235,7 @@ func buildConfigOptions(region string, cred aws.CredentialsProvider, opts *optio config.WithDefaultRegion(defaultRegion), config.WithRegion(region), config.WithCredentialsProvider(cred), + config.WithCredentialsCacheOptions(awsCredentialsCacheOptions), } if modules.GetModules().IsBoringBinary() { configOpts = append(configOpts, config.WithUseFIPSEndpoint(aws.FIPSEndpointStateEnabled)) @@ -232,27 +251,35 @@ func buildConfigOptions(region string, cred aws.CredentialsProvider, opts *optio // getBaseConfig returns an AWS config without assuming any roles. func getBaseConfig(ctx context.Context, region string, opts *options) (aws.Config, error) { - var cred aws.CredentialsProvider + slog.DebugContext(ctx, "Initializing AWS config from default credential chain", + "region", region, + ) + cfg, err := loadDefaultConfig(ctx, region, nil, opts) + if err != nil { + return aws.Config{}, trace.Wrap(err) + } + if opts.credentialsSource == credentialsSourceIntegration { - if opts.integrationCredentialsProvider == nil { - return aws.Config{}, trace.BadParameter("missing aws integration credential provider") + slog.DebugContext(ctx, "Initializing AWS config with OIDC integration credentials", + "region", region, + "integration", opts.integration, + ) + provider := &integrationCredentialsProvider{ + OIDCIntegrationClient: opts.oidcIntegrationClient, + stsClt: opts.stsClientProvider(cfg), + integrationName: opts.integration, } - - slog.DebugContext(ctx, "Initializing AWS config with integration", "region", region, "integration", opts.integration) - var err error - cred, err = opts.integrationCredentialsProvider(ctx, region, opts.integration) + cc := aws.NewCredentialsCache(provider, awsCredentialsCacheOptions) + _, err := cc.Retrieve(ctx) if err != nil { return aws.Config{}, trace.Wrap(err) } - } else { - slog.DebugContext(ctx, "Initializing AWS config from default credential chain", "region", region) + cfg.Credentials = cc } - - cfg, err := loadDefaultConfig(ctx, region, cred, opts) - return cfg, trace.Wrap(err) + return cfg, nil } -func getConfigForRoleChain(ctx context.Context, cfg aws.Config, roles []AssumeRole, newCltFn AssumeRoleClientProviderFunc) (aws.Config, error) { +func getConfigForRoleChain(ctx context.Context, cfg aws.Config, roles []AssumeRole, newCltFn STSClientProviderFunc) (aws.Config, error) { for _, r := range roles { cfg.Credentials = getAssumeRoleProvider(ctx, newCltFn(cfg), r) } @@ -277,3 +304,41 @@ func getAssumeRoleProvider(ctx context.Context, clt stscreds.AssumeRoleAPIClient } }) } + +// staticIdentityToken provides itself as a JWT []byte token to implement +// [stscreds.IdentityTokenRetriever]. +type staticIdentityToken string + +// GetIdentityToken retrieves the JWT token. +func (t staticIdentityToken) GetIdentityToken() ([]byte, error) { + return []byte(t), nil +} + +// integrationCredentialsProvider provides AWS OIDC integration credentials. +type integrationCredentialsProvider struct { + OIDCIntegrationClient + stsClt STSClient + integrationName string +} + +// Retrieve provides [aws.Credentials] for an AWS OIDC integration. +func (p *integrationCredentialsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + integration, err := p.GetIntegration(ctx, p.integrationName) + if err != nil { + return aws.Credentials{}, trace.Wrap(err) + } + spec := integration.GetAWSOIDCIntegrationSpec() + if spec == nil { + return aws.Credentials{}, trace.BadParameter("invalid integration subkind, expected awsoidc, got %s", integration.GetSubKind()) + } + token, err := p.GenerateAWSOIDCToken(ctx, p.integrationName) + if err != nil { + return aws.Credentials{}, trace.Wrap(err) + } + cred, err := stscreds.NewWebIdentityRoleProvider( + p.stsClt, + spec.RoleARN, + staticIdentityToken(token), + ).Retrieve(ctx) + return cred, trace.Wrap(err) +} diff --git a/lib/cloud/awsconfig/awsconfig_test.go b/lib/cloud/awsconfig/awsconfig_test.go index 3cb2c4eda3123..2de624fe86c54 100644 --- a/lib/cloud/awsconfig/awsconfig_test.go +++ b/lib/cloud/awsconfig/awsconfig_test.go @@ -24,20 +24,13 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/service/sts" ststypes "github.com/aws/aws-sdk-go-v2/service/sts/types" "github.com/gravitational/trace" "github.com/stretchr/testify/require" -) - -type mockCredentialProvider struct { - cred aws.Credentials -} -func (m *mockCredentialProvider) Retrieve(_ context.Context) (aws.Credentials, error) { - return m.cred, nil -} + "github.com/gravitational/teleport/api/types" +) type mockAssumeRoleAPIClient struct{} @@ -57,6 +50,18 @@ func (m *mockAssumeRoleAPIClient) AssumeRole(_ context.Context, params *sts.Assu }, nil } +func (m *mockAssumeRoleAPIClient) AssumeRoleWithWebIdentity(ctx context.Context, in *sts.AssumeRoleWithWebIdentityInput, _ ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) { + expiry := time.Now().Add(60 * time.Minute) + return &sts.AssumeRoleWithWebIdentityOutput{ + Credentials: &ststypes.Credentials{ + AccessKeyId: in.RoleArn, + SecretAccessKey: in.WebIdentityToken, + SessionToken: aws.String("token"), + Expiration: &expiry, + }, + }, nil +} + func TestGetConfigIntegration(t *testing.T) { t.Parallel() @@ -86,32 +91,100 @@ func testGetConfigIntegration(t *testing.T, provider Provider) { dummyIntegration := "integration-test" dummyRegion := "test-region-123" - t.Run("without an integration credential provider, must return missing credential provider error", func(t *testing.T) { + awsOIDCIntegration, err := types.NewIntegrationAWSOIDC( + types.Metadata{Name: "integration-test"}, + &types.AWSOIDCIntegrationSpecV1{ + RoleARN: "arn:aws:sts::123456789012:role/TestRole", + }, + ) + require.NoError(t, err) + fakeIntegrationClt := fakeOIDCIntegrationClient{ + getIntegrationFn: func(context.Context, string) (types.Integration, error) { + return awsOIDCIntegration, nil + }, + getTokenFn: func(context.Context, string) (string, error) { + return "oidc-token", nil + }, + } + + stsClt := func(cfg aws.Config) STSClient { + return &mockAssumeRoleAPIClient{} + } + + t.Run("without an integration client, must return missing credential provider error", func(t *testing.T) { ctx := context.Background() _, err := provider.GetConfig(ctx, dummyRegion, WithCredentialsMaybeIntegration(dummyIntegration)) require.True(t, trace.IsBadParameter(err), "unexpected error: %v", err) - require.ErrorContains(t, err, "missing aws integration credential provider") + require.ErrorContains(t, err, "missing AWS OIDC integration client") + }) + + t.Run("with an integration client, must return integration fetch error", func(t *testing.T) { + ctx := context.Background() + + fakeIntegrationClt := fakeIntegrationClt + fakeIntegrationClt.getIntegrationFn = func(context.Context, string) (types.Integration, error) { + return nil, trace.NotFound("integration not found") + } + _, err := provider.GetConfig(ctx, dummyRegion, + WithCredentialsMaybeIntegration(dummyIntegration), + WithOIDCIntegrationClient(&fakeIntegrationClt), + WithSTSClientProvider(stsClt), + ) + require.Error(t, err) + require.ErrorContains(t, err, "integration not found") + }) + + t.Run("with an integration client, must check for AWS integration subkind", func(t *testing.T) { + ctx := context.Background() + + azureIntegration, err := types.NewIntegrationAzureOIDC( + types.Metadata{Name: "integration-test"}, + &types.AzureOIDCIntegrationSpecV1{ + TenantID: "abc", + ClientID: "123", + }, + ) + require.NoError(t, err) + fakeIntegrationClt := fakeIntegrationClt + fakeIntegrationClt.getIntegrationFn = func(context.Context, string) (types.Integration, error) { + return azureIntegration, nil + } + _, err = provider.GetConfig(ctx, dummyRegion, + WithCredentialsMaybeIntegration(dummyIntegration), + WithOIDCIntegrationClient(&fakeIntegrationClt), + WithSTSClientProvider(stsClt), + ) + require.Error(t, err) + require.ErrorContains(t, err, "invalid integration subkind") + }) + + t.Run("with an integration client, must return token generation errors", func(t *testing.T) { + ctx := context.Background() + fakeIntegrationClt := fakeIntegrationClt + fakeIntegrationClt.getTokenFn = func(context.Context, string) (string, error) { + return "", trace.BadParameter("failed to generate OIDC token") + } + _, err = provider.GetConfig(ctx, dummyRegion, + WithCredentialsMaybeIntegration(dummyIntegration), + WithOIDCIntegrationClient(&fakeIntegrationClt), + WithSTSClientProvider(stsClt), + ) + require.Error(t, err) + require.ErrorContains(t, err, "failed to generate OIDC token") }) - t.Run("with an integration credential provider, must return the credentials", func(t *testing.T) { + t.Run("with an integration client, must return the credentials", func(t *testing.T) { ctx := context.Background() cfg, err := provider.GetConfig(ctx, dummyRegion, WithCredentialsMaybeIntegration(dummyIntegration), - WithIntegrationCredentialProvider(func(ctx context.Context, region, integration string) (aws.CredentialsProvider, error) { - if region == dummyRegion && integration == dummyIntegration { - return &mockCredentialProvider{ - cred: aws.Credentials{ - SessionToken: "foo-bar", - }, - }, nil - } - return nil, trace.NotFound("no creds in region %q with integration %q", region, integration) - })) + WithOIDCIntegrationClient(&fakeIntegrationClt), + WithSTSClientProvider(stsClt), + ) require.NoError(t, err) creds, err := cfg.Credentials.Retrieve(ctx) require.NoError(t, err) - require.Equal(t, "foo-bar", creds.SessionToken) + require.Equal(t, "oidc-token", creds.SecretAccessKey) }) t.Run("with an integration credential provider assuming a role, must return assumed role credentials", func(t *testing.T) { @@ -119,23 +192,9 @@ func testGetConfigIntegration(t *testing.T, provider Provider) { cfg, err := provider.GetConfig(ctx, dummyRegion, WithCredentialsMaybeIntegration(dummyIntegration), - WithIntegrationCredentialProvider(func(ctx context.Context, region, integration string) (aws.CredentialsProvider, error) { - if region == dummyRegion && integration == dummyIntegration { - return &mockCredentialProvider{ - cred: aws.Credentials{ - SessionToken: "foo-bar", - }, - }, nil - } - return nil, trace.NotFound("no creds in region %q with integration %q", region, integration) - }), + WithOIDCIntegrationClient(&fakeIntegrationClt), WithAssumeRole("roleA", "abc123"), - WithAssumeRoleClientProviderFunc(func(cfg aws.Config) stscreds.AssumeRoleAPIClient { - creds, err := cfg.Credentials.Retrieve(context.Background()) - require.NoError(t, err) - require.Equal(t, "foo-bar", creds.SessionToken) - return &mockAssumeRoleAPIClient{} - }), + WithSTSClientProvider(stsClt), ) require.NoError(t, err) creds, err := cfg.Credentials.Retrieve(ctx) @@ -148,25 +207,11 @@ func testGetConfigIntegration(t *testing.T, provider Provider) { ctx := context.Background() _, err := provider.GetConfig(ctx, dummyRegion, WithCredentialsMaybeIntegration(dummyIntegration), - WithIntegrationCredentialProvider(func(ctx context.Context, region, integration string) (aws.CredentialsProvider, error) { - if region == dummyRegion && integration == dummyIntegration { - return &mockCredentialProvider{ - cred: aws.Credentials{ - SessionToken: "foo-bar", - }, - }, nil - } - return nil, trace.NotFound("no creds in region %q with integration %q", region, integration) - }), + WithOIDCIntegrationClient(&fakeIntegrationClt), WithAssumeRole("roleA", "abc123"), WithAssumeRole("roleB", "abc123"), WithAssumeRole("roleC", "abc123"), - WithAssumeRoleClientProviderFunc(func(cfg aws.Config) stscreds.AssumeRoleAPIClient { - creds, err := cfg.Credentials.Retrieve(context.Background()) - require.NoError(t, err) - require.Equal(t, "foo-bar", creds.SessionToken) - return &mockAssumeRoleAPIClient{} - }), + WithSTSClientProvider(stsClt), ) require.Error(t, err) require.ErrorContains(t, err, "role chain contains more than 2 roles") @@ -177,10 +222,8 @@ func testGetConfigIntegration(t *testing.T, provider Provider) { _, err := provider.GetConfig(ctx, dummyRegion, WithCredentialsMaybeIntegration(""), - WithIntegrationCredentialProvider(func(ctx context.Context, region, integration string) (aws.CredentialsProvider, error) { - require.Fail(t, "this function should not be called") - return nil, nil - })) + WithOIDCIntegrationClient(&fakeOIDCIntegrationClient{unauth: true}), + ) require.NoError(t, err) }) @@ -189,10 +232,8 @@ func testGetConfigIntegration(t *testing.T, provider Provider) { _, err := provider.GetConfig(ctx, dummyRegion, WithAmbientCredentials(), - WithIntegrationCredentialProvider(func(ctx context.Context, region, integration string) (aws.CredentialsProvider, error) { - require.Fail(t, "this function should not be called") - return nil, nil - })) + WithOIDCIntegrationClient(&fakeOIDCIntegrationClient{unauth: true}), + ) require.NoError(t, err) }) @@ -200,10 +241,8 @@ func testGetConfigIntegration(t *testing.T, provider Provider) { ctx := context.Background() _, err := provider.GetConfig(ctx, dummyRegion, - WithIntegrationCredentialProvider(func(ctx context.Context, region, integration string) (aws.CredentialsProvider, error) { - require.Fail(t, "this function should not be called") - return nil, nil - })) + WithOIDCIntegrationClient(&fakeOIDCIntegrationClient{unauth: true}), + ) require.Error(t, err) require.ErrorContains(t, err, "missing credentials source") }) @@ -221,3 +260,24 @@ func TestNewCacheKey(t *testing.T) { `) require.Equal(t, want, got) } + +type fakeOIDCIntegrationClient struct { + unauth bool + + getIntegrationFn func(context.Context, string) (types.Integration, error) + getTokenFn func(context.Context, string) (string, error) +} + +func (f *fakeOIDCIntegrationClient) GetIntegration(ctx context.Context, name string) (types.Integration, error) { + if f.unauth { + return nil, trace.AccessDenied("unauthorized") + } + return f.getIntegrationFn(ctx, name) +} + +func (f *fakeOIDCIntegrationClient) GenerateAWSOIDCToken(ctx context.Context, integrationName string) (string, error) { + if f.unauth { + return "", trace.AccessDenied("unauthorized") + } + return f.getTokenFn(ctx, integrationName) +} diff --git a/lib/cloud/awsconfig/cache.go b/lib/cloud/awsconfig/cache.go index 3d664ba04c350..cdb315703212a 100644 --- a/lib/cloud/awsconfig/cache.go +++ b/lib/cloud/awsconfig/cache.go @@ -36,10 +36,23 @@ func awsCredentialsCacheOptions(opts *aws.CredentialsCacheOptions) { // role. type Cache struct { awsConfigCache *utils.FnCache + defaultOptions []OptionsFn +} + +// CacheOption is an option func for setting additional options when creating +// a new config cache. +type CacheOption func(*Cache) + +// WithDefaults is a [CacheOption] function that sets default [OptionsFn] to +// use when getting AWS config. +func WithDefaults(optFns ...OptionsFn) CacheOption { + return func(c *Cache) { + c.defaultOptions = optFns + } } // NewCache returns a new [Cache]. -func NewCache() (*Cache, error) { +func NewCache(optFns ...CacheOption) (*Cache, error) { c, err := utils.NewFnCache(utils.FnCacheConfig{ TTL: 15 * time.Minute, ReloadOnErr: true, @@ -47,14 +60,27 @@ func NewCache() (*Cache, error) { if err != nil { return nil, trace.Wrap(err) } - return &Cache{ + cache := &Cache{ awsConfigCache: c, - }, nil + } + for _, fn := range optFns { + fn(cache) + } + return cache, nil +} + +// withDefaultOptions prepends default options to the given option funcs, +// providing for default cache options and per-call options. +func (c *Cache) withDefaultOptions(optFns []OptionsFn) []OptionsFn { + if c.defaultOptions != nil { + return append(c.defaultOptions, optFns...) + } + return optFns } // GetConfig returns an [aws.Config] for the given region and options. func (c *Cache) GetConfig(ctx context.Context, region string, optFns ...OptionsFn) (aws.Config, error) { - opts, err := buildOptions(optFns...) + opts, err := buildOptions(c.withDefaultOptions(optFns)...) if err != nil { return aws.Config{}, trace.Wrap(err) } @@ -112,7 +138,7 @@ func (c *Cache) getConfigForRoleChain(ctx context.Context, cfg aws.Config, opts } credProvider, err := utils.FnCacheGet(ctx, c.awsConfigCache, cacheKey, func(ctx context.Context) (aws.CredentialsProvider, error) { - clt := opts.assumeRoleClientProvider(cfg) + clt := opts.stsClientProvider(cfg) credProvider := getAssumeRoleProvider(ctx, clt, r) cc := aws.NewCredentialsCache(credProvider, awsCredentialsCacheOptions, diff --git a/lib/cloud/mocks/aws_config.go b/lib/cloud/mocks/aws_config.go index 7edadf80a9e20..b52dfbd36d74a 100644 --- a/lib/cloud/mocks/aws_config.go +++ b/lib/cloud/mocks/aws_config.go @@ -22,12 +22,15 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/gravitational/trace" + "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/awsconfig" ) type AWSConfigProvider struct { - STSClient *STSClient + STSClient *STSClient + OIDCIntegrationClient awsconfig.OIDCIntegrationClient } func (f *AWSConfigProvider) GetConfig(ctx context.Context, region string, optFns ...awsconfig.OptionsFn) (aws.Config, error) { @@ -35,8 +38,32 @@ func (f *AWSConfigProvider) GetConfig(ctx context.Context, region string, optFns if stsClt == nil { stsClt = &STSClient{} } - optFns = append(optFns, awsconfig.WithAssumeRoleClientProviderFunc( - newAssumeRoleClientProviderFunc(stsClt), - )) + optFns = append(optFns, + awsconfig.WithOIDCIntegrationClient(f.OIDCIntegrationClient), + awsconfig.WithSTSClientProvider( + newAssumeRoleClientProviderFunc(stsClt), + ), + ) return awsconfig.GetConfig(ctx, region, optFns...) } + +type FakeOIDCIntegrationClient struct { + Unauth bool + + Integration types.Integration + Token string +} + +func (f *FakeOIDCIntegrationClient) GetIntegration(ctx context.Context, name string) (types.Integration, error) { + if f.Unauth { + return nil, trace.AccessDenied("unauthorized") + } + return f.Integration, nil +} + +func (f *FakeOIDCIntegrationClient) GenerateAWSOIDCToken(ctx context.Context, integrationName string) (string, error) { + if f.Unauth { + return "", trace.AccessDenied("unauthorized") + } + return f.Token, nil +} diff --git a/lib/cloud/mocks/aws_sts.go b/lib/cloud/mocks/aws_sts.go index 713de480ebf86..178a1259669a4 100644 --- a/lib/cloud/mocks/aws_sts.go +++ b/lib/cloud/mocks/aws_sts.go @@ -54,7 +54,20 @@ type STSClient struct { recordFn func(roleARN, externalID string) } -func (m *STSClient) AssumeRole(ctx context.Context, in *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) { +func (m *STSClient) AssumeRoleWithWebIdentity(ctx context.Context, in *sts.AssumeRoleWithWebIdentityInput, _ ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) { + m.record(aws.ToString(in.RoleArn), "") + expiry := time.Now().Add(60 * time.Minute) + return &sts.AssumeRoleWithWebIdentityOutput{ + Credentials: &ststypes.Credentials{ + AccessKeyId: in.RoleArn, + SecretAccessKey: aws.String("secret"), + SessionToken: aws.String("token"), + Expiration: &expiry, + }, + }, nil +} + +func (m *STSClient) AssumeRole(ctx context.Context, in *sts.AssumeRoleInput, _ ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) { // Retrieve credentials if we have a credential provider, so that all // assume-role providers in a role chain are triggered to call AssumeRole. if m.credentialProvider != nil { @@ -93,8 +106,8 @@ func (m *STSClient) record(roleARN, externalID string) { } } -func newAssumeRoleClientProviderFunc(base *STSClient) awsconfig.AssumeRoleClientProviderFunc { - return func(cfg aws.Config) stscreds.AssumeRoleAPIClient { +func newAssumeRoleClientProviderFunc(base *STSClient) awsconfig.STSClientProviderFunc { + return func(cfg aws.Config) awsconfig.STSClient { if cfg.Credentials != nil { if _, ok := cfg.Credentials.(*stscreds.AssumeRoleProvider); ok { // Create a new fake client linked to the old one. diff --git a/lib/integrations/awsoidc/clientsv1.go b/lib/integrations/awsoidc/clientsv1.go index 8c16f4c66156a..ae2e0be6a186b 100644 --- a/lib/integrations/awsoidc/clientsv1.go +++ b/lib/integrations/awsoidc/clientsv1.go @@ -44,9 +44,6 @@ type IntegrationTokenGenerator interface { // GetIntegration returns the specified integration resources. GetIntegration(ctx context.Context, name string) (types.Integration, error) - // GetProxies returns a list of registered proxies. - GetProxies() ([]types.Server, error) - // GenerateAWSOIDCToken generates a token to be used to execute an AWS OIDC Integration action. GenerateAWSOIDCToken(ctx context.Context, integration string) (string, error) } diff --git a/lib/srv/discovery/discovery.go b/lib/srv/discovery/discovery.go index 28690130d51a7..f37ba025d2450 100644 --- a/lib/srv/discovery/discovery.go +++ b/lib/srv/discovery/discovery.go @@ -224,7 +224,11 @@ kubernetes matchers are present.`) c.CloudClients = cloudClients } if c.AWSConfigProvider == nil { - provider, err := awsconfig.NewCache() + provider, err := awsconfig.NewCache( + awsconfig.WithDefaults( + awsconfig.WithOIDCIntegrationClient(c.AccessPoint), + ), + ) if err != nil { return trace.Wrap(err, "unable to create AWS config provider cache") } @@ -232,9 +236,8 @@ kubernetes matchers are present.`) } if c.AWSDatabaseFetcherFactory == nil { factory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ - CloudClients: c.CloudClients, - AWSConfigProvider: c.AWSConfigProvider, - IntegrationCredentialProviderFn: c.getIntegrationCredentialProviderFn(), + CloudClients: c.CloudClients, + AWSConfigProvider: c.AWSConfigProvider, }) if err != nil { return trace.Wrap(err) @@ -312,33 +315,10 @@ kubernetes matchers are present.`) } func (c *Config) getAWSConfig(ctx context.Context, region string, opts ...awsconfig.OptionsFn) (aws.Config, error) { - opts = append(opts, awsconfig.WithIntegrationCredentialProvider(c.getIntegrationCredentialProviderFn())) cfg, err := c.AWSConfigProvider.GetConfig(ctx, region, opts...) return cfg, trace.Wrap(err) } -func (c *Config) getIntegrationCredentialProviderFn() awsconfig.IntegrationCredentialProviderFunc { - return func(ctx context.Context, region, integrationName string) (aws.CredentialsProvider, error) { - integration, err := c.AccessPoint.GetIntegration(ctx, integrationName) - if err != nil { - return nil, trace.Wrap(err) - } - if integration.GetAWSOIDCIntegrationSpec() == nil { - return nil, trace.BadParameter("integration does not have aws oidc spec fields %q", integrationName) - } - token, err := c.AccessPoint.GenerateAWSOIDCToken(ctx, integrationName) - if err != nil { - return nil, trace.Wrap(err) - } - cred, err := awsoidc.NewAWSCredentialsProvider(ctx, &awsoidc.AWSClientRequest{ - Token: token, - RoleARN: integration.GetAWSOIDCIntegrationSpec().RoleARN, - Region: region, - }) - return cred, trace.Wrap(err) - } -} - // Server is a discovery server, used to discover cloud resources for // inclusion in Teleport type Server struct { diff --git a/lib/srv/discovery/discovery_test.go b/lib/srv/discovery/discovery_test.go index f3c387a475932..865517ba4c33c 100644 --- a/lib/srv/discovery/discovery_test.go +++ b/lib/srv/discovery/discovery_test.go @@ -37,7 +37,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redis/armredis/v3" awsv2 "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/redshift" @@ -2032,18 +2031,6 @@ func TestDiscoveryDatabase(t *testing.T) { Clusters: []*eks.Cluster{eksAWSResource}, }, } - fakeConfigProvider := &mocks.AWSConfigProvider{} - dbFetcherFactory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ - AWSConfigProvider: fakeConfigProvider, - CloudClients: testCloudClients, - IntegrationCredentialProviderFn: func(_ context.Context, _, _ string) (awsv2.CredentialsProvider, error) { - return credentials.NewStaticCredentialsProvider("key", "secret", "session"), nil - }, - RedshiftClientProviderFn: newFakeRedshiftClientProvider(&mocks.RedshiftClient{ - Clusters: []redshifttypes.Cluster{*awsRedshiftResource}, - }), - }) - require.NoError(t, err) tcs := []struct { name string @@ -2334,6 +2321,23 @@ func TestDiscoveryDatabase(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tlsServer.Close()) }) + awsOIDCIntegration, err := types.NewIntegrationAWSOIDC(types.Metadata{ + Name: integrationName, + }, &types.AWSOIDCIntegrationSpecV1{ + RoleARN: "arn:aws:iam::123456789012:role/teleport", + }) + require.NoError(t, err) + + testAuthServer.AuthServer.IntegrationsTokenGenerator = &mockIntegrationsTokenGenerator{ + proxies: nil, + integrations: map[string]types.Integration{ + awsOIDCIntegration.GetName(): awsOIDCIntegration, + }, + } + + _, err = tlsServer.Auth().CreateIntegration(ctx, awsOIDCIntegration) + require.NoError(t, err) + // Auth client for discovery service. identity := auth.TestServerID(types.RoleDiscovery, "hostID") authClient, err := tlsServer.NewClient(identity) @@ -2349,6 +2353,19 @@ func TestDiscoveryDatabase(t *testing.T) { waitForReconcile := make(chan struct{}) reporter := &mockUsageReporter{} tlsServer.Auth().SetUsageReporter(reporter) + accessPoint := getDiscoveryAccessPoint(tlsServer.Auth(), authClient) + fakeConfigProvider := &mocks.AWSConfigProvider{ + OIDCIntegrationClient: accessPoint, + } + dbFetcherFactory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ + AWSConfigProvider: fakeConfigProvider, + CloudClients: testCloudClients, + RedshiftClientProviderFn: newFakeRedshiftClientProvider(&mocks.RedshiftClient{ + Clusters: []redshifttypes.Cluster{*awsRedshiftResource}, + }), + }) + require.NoError(t, err) + srv, err := New( authz.ContextWithUser(ctx, identity.I), &Config{ @@ -2358,7 +2375,7 @@ func TestDiscoveryDatabase(t *testing.T) { AWSConfigProvider: fakeConfigProvider, ClusterFeatures: func() proto.Features { return proto.Features{} }, KubernetesClient: fake.NewSimpleClientset(), - AccessPoint: getDiscoveryAccessPoint(tlsServer.Auth(), authClient), + AccessPoint: accessPoint, Matchers: Matchers{ AWS: tc.awsMatchers, Azure: tc.azureMatchers, diff --git a/lib/srv/discovery/fetchers/db/aws.go b/lib/srv/discovery/fetchers/db/aws.go index f87e0e9a6c443..d6d70912d7092 100644 --- a/lib/srv/discovery/fetchers/db/aws.go +++ b/lib/srv/discovery/fetchers/db/aws.go @@ -55,9 +55,6 @@ type awsFetcherConfig struct { AWSClients cloud.AWSClients // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider - // IntegrationCredentialProviderFn is a required function that provides - // credentials via AWS OIDC integration. - IntegrationCredentialProviderFn awsconfig.IntegrationCredentialProviderFunc // Type is the type of DB matcher, for example "rds", "redshift", etc. Type string // AssumeRole provides a role ARN and ExternalID to assume an AWS role diff --git a/lib/srv/discovery/fetchers/db/aws_redshift.go b/lib/srv/discovery/fetchers/db/aws_redshift.go index 508cb6e8810f1..0cda0b478e67b 100644 --- a/lib/srv/discovery/fetchers/db/aws_redshift.go +++ b/lib/srv/discovery/fetchers/db/aws_redshift.go @@ -53,7 +53,6 @@ func (f *redshiftPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConfig awsCfg, err := cfg.AWSConfigProvider.GetConfig(ctx, cfg.Region, awsconfig.WithAssumeRole(cfg.AssumeRole.RoleARN, cfg.AssumeRole.ExternalID), awsconfig.WithCredentialsMaybeIntegration(cfg.Integration), - awsconfig.WithIntegrationCredentialProvider(cfg.IntegrationCredentialProviderFn), ) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/srv/discovery/fetchers/db/db.go b/lib/srv/discovery/fetchers/db/db.go index 3ef56532d90af..8d79bc2bb65bc 100644 --- a/lib/srv/discovery/fetchers/db/db.go +++ b/lib/srv/discovery/fetchers/db/db.go @@ -73,9 +73,6 @@ type AWSFetcherFactoryConfig struct { AWSConfigProvider awsconfig.Provider // CloudClients is an interface for retrieving AWS SDK v1 cloud clients. CloudClients cloud.AWSClients - // IntegrationCredentialProviderFn is an optional function that provides - // credentials via AWS OIDC integration. - IntegrationCredentialProviderFn awsconfig.IntegrationCredentialProviderFunc // RedshiftClientProviderFn is an optional function that provides RedshiftClientProviderFn RedshiftClientProviderFunc } @@ -128,16 +125,15 @@ func (f *AWSFetcherFactory) MakeFetchers(ctx context.Context, matchers []types.A for _, makeFetcher := range makeFetchers { for _, region := range matcher.Regions { fetcher, err := makeFetcher(awsFetcherConfig{ - AWSClients: f.cfg.CloudClients, - Type: matcherType, - AssumeRole: assumeRole, - Labels: matcher.Tags, - Region: region, - Integration: matcher.Integration, - DiscoveryConfigName: discoveryConfigName, - AWSConfigProvider: f.cfg.AWSConfigProvider, - IntegrationCredentialProviderFn: f.cfg.IntegrationCredentialProviderFn, - redshiftClientProviderFn: f.cfg.RedshiftClientProviderFn, + AWSClients: f.cfg.CloudClients, + Type: matcherType, + AssumeRole: assumeRole, + Labels: matcher.Tags, + Region: region, + Integration: matcher.Integration, + DiscoveryConfigName: discoveryConfigName, + AWSConfigProvider: f.cfg.AWSConfigProvider, + redshiftClientProviderFn: f.cfg.RedshiftClientProviderFn, }) if err != nil { return nil, trace.Wrap(err) From 5ed46a0ea7047c31174024664f83cdc26bcc493f Mon Sep 17 00:00:00 2001 From: Alan Parra Date: Wed, 8 Jan 2025 15:30:07 -0300 Subject: [PATCH 13/45] fix: Update excluded google.golang.org/grpc/stats/opentelemetry versions (#50881) --- go.mod | 5 ++++- integrations/event-handler/go.mod | 5 ++++- integrations/terraform/go.mod | 5 ++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e9f8023eb7323..77012f1103bc7 100644 --- a/go.mod +++ b/go.mod @@ -581,4 +581,7 @@ replace ( // least, one of the grpc-go versions above we need to exclude // stats/opentelemetry in order to avoid "ambiguous import" errors on build. // TODO(codingllama): Remove once no dependencies import stats/opentelemetry. -exclude google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a +exclude ( + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 +) diff --git a/integrations/event-handler/go.mod b/integrations/event-handler/go.mod index 873e43e03bce5..922ce6e72ae3c 100644 --- a/integrations/event-handler/go.mod +++ b/integrations/event-handler/go.mod @@ -351,4 +351,7 @@ replace ( ) // TODO(codingllama): Remove once no dependencies import stats/opentelemetry. -exclude google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a +exclude ( + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 +) diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod index 1d9aa5363d8cc..50246feeb9ed6 100644 --- a/integrations/terraform/go.mod +++ b/integrations/terraform/go.mod @@ -413,4 +413,7 @@ replace ( ) // TODO(codingllama): Remove once no dependencies import stats/opentelemetry. -exclude google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a +exclude ( + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 +) From c589ae29cc25a3c249f3cc4e896549a080a463f1 Mon Sep 17 00:00:00 2001 From: Gavin Frazar Date: Wed, 8 Jan 2025 10:47:29 -0800 Subject: [PATCH 14/45] Handle retryable errors in postgres e2e tests (#50605) This wraps the test pgx.Conn in a helper struct that adds retries for retryable failures for all calls to Exec. --- e2e/aws/databases_test.go | 146 +++++++++++++++++++++++++++++--------- e2e/aws/fixtures_test.go | 4 -- e2e/aws/rds_test.go | 9 ++- e2e/aws/redshift_test.go | 11 ++- 4 files changed, 121 insertions(+), 49 deletions(-) diff --git a/e2e/aws/databases_test.go b/e2e/aws/databases_test.go index e7395ca8e01ce..fee21e7fe52f4 100644 --- a/e2e/aws/databases_test.go +++ b/e2e/aws/databases_test.go @@ -22,17 +22,22 @@ import ( "context" "crypto/tls" "encoding/json" + "errors" "fmt" + "log/slog" "net" "os" "strconv" + "strings" "testing" "time" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/secretsmanager" mysqlclient "github.com/go-mysql-org/go-mysql/client" + "github.com/gravitational/trace" "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -41,6 +46,7 @@ import ( apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/keys" + "github.com/gravitational/teleport/api/utils/retryutils" "github.com/gravitational/teleport/integration/helpers" "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/cryptosuites" @@ -50,6 +56,7 @@ import ( "github.com/gravitational/teleport/lib/srv/db/common" "github.com/gravitational/teleport/lib/srv/db/postgres" "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/teleport/lib/utils" ) func TestDatabases(t *testing.T) { @@ -140,29 +147,14 @@ func postgresConnTest(t *testing.T, cluster *helpers.TeleInstance, user string, assert.NotNil(t, pgConn) }, waitForConnTimeout, connRetryTick, "connecting to postgres") - // dont wait forever on the exec or close. - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Execute a query. - results, err := pgConn.Exec(ctx, query).ReadAll() - require.NoError(t, err) - for i, r := range results { - require.NoError(t, r.Err, "error in result %v", i) - } - - // Disconnect. - err = pgConn.Close(ctx) - require.NoError(t, err) + execPGTestQuery(t, pgConn, query) } // postgresLocalProxyConnTest tests connection to a postgres database via // local proxy tunnel. func postgresLocalProxyConnTest(t *testing.T, cluster *helpers.TeleInstance, user string, route tlsca.RouteToDatabase, query string) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), 2*waitForConnTimeout) - defer cancel() - lp := startLocalALPNProxy(t, ctx, user, cluster, route) + lp := startLocalALPNProxy(t, user, cluster, route) pgconnConfig, err := pgconn.ParseConfig(fmt.Sprintf("postgres://%v/", lp.GetAddr())) require.NoError(t, err) @@ -180,30 +172,36 @@ func postgresLocalProxyConnTest(t *testing.T, cluster *helpers.TeleInstance, use assert.NotNil(t, pgConn) }, waitForConnTimeout, connRetryTick, "connecting to postgres") - // dont wait forever on the exec or close. - ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + execPGTestQuery(t, pgConn, query) +} + +func execPGTestQuery(t *testing.T, conn *pgconn.PgConn, query string) { + t.Helper() + defer func() { + // dont wait forever to gracefully terminate. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + // Disconnect. + require.NoError(t, conn.Close(ctx)) + }() + + // dont wait forever on the exec. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() // Execute a query. - results, err := pgConn.Exec(ctx, query).ReadAll() + results, err := conn.Exec(ctx, query).ReadAll() require.NoError(t, err) for i, r := range results { require.NoError(t, r.Err, "error in result %v", i) } - - // Disconnect. - err = pgConn.Close(ctx) - require.NoError(t, err) } // mysqlLocalProxyConnTest tests connection to a MySQL database via // local proxy tunnel. func mysqlLocalProxyConnTest(t *testing.T, cluster *helpers.TeleInstance, user string, route tlsca.RouteToDatabase, query string) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), 2*waitForConnTimeout) - defer cancel() - - lp := startLocalALPNProxy(t, ctx, user, cluster, route) + lp := startLocalALPNProxy(t, user, cluster, route) var conn *mysqlclient.Conn // retry for a while, the database service might need time to give @@ -223,19 +221,22 @@ func mysqlLocalProxyConnTest(t *testing.T, cluster *helpers.TeleInstance, user s assert.NoError(t, err) assert.NotNil(t, conn) }, waitForConnTimeout, connRetryTick, "connecting to mysql") + defer func() { + // Disconnect. + require.NoError(t, conn.Close()) + }() // Execute a query. require.NoError(t, conn.SetDeadline(time.Now().Add(10*time.Second))) _, err := conn.Execute(query) require.NoError(t, err) - - // Disconnect. - require.NoError(t, conn.Close()) } // startLocalALPNProxy starts local ALPN proxy for the specified database. -func startLocalALPNProxy(t *testing.T, ctx context.Context, user string, cluster *helpers.TeleInstance, route tlsca.RouteToDatabase) *alpnproxy.LocalProxy { +func startLocalALPNProxy(t *testing.T, user string, cluster *helpers.TeleInstance, route tlsca.RouteToDatabase) *alpnproxy.LocalProxy { t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) proto, err := alpncommon.ToALPNProtocol(route.Protocol) require.NoError(t, err) @@ -337,7 +338,7 @@ type dbUserLogin struct { port int } -func connectPostgres(t *testing.T, ctx context.Context, info dbUserLogin, dbName string) *pgx.Conn { +func connectPostgres(t *testing.T, ctx context.Context, info dbUserLogin, dbName string) *pgConn { pgCfg, err := pgx.ParseConfig(fmt.Sprintf("postgres://%s:%d/?sslmode=verify-full", info.address, info.port)) require.NoError(t, err) pgCfg.User = info.username @@ -353,7 +354,10 @@ func connectPostgres(t *testing.T, ctx context.Context, info dbUserLogin, dbName t.Cleanup(func() { _ = conn.Close(ctx) }) - return conn + return &pgConn{ + logger: utils.NewSlogLoggerForTests(), + Conn: conn, + } } // secretPassword is used to unmarshal an AWS Secrets Manager @@ -395,3 +399,77 @@ func getSecretValue(t *testing.T, ctx context.Context, secretID string) secretsm require.NotNil(t, secretVal) return *secretVal } + +// pgConn wraps a [pgx.Conn] and adds retries to all Exec calls. +type pgConn struct { + logger *slog.Logger + *pgx.Conn +} + +func (c *pgConn) Exec(ctx context.Context, sql string, args ...interface{}) (pgconn.CommandTag, error) { + var out pgconn.CommandTag + err := withRetry(ctx, c.logger, func() error { + var err error + out, err = c.Conn.Exec(ctx, sql, args...) + return trace.Wrap(err) + }) + return out, trace.Wrap(err) +} + +// withRetry runs a given func a finite number of times until it returns nil +// error or the given context is done. +func withRetry(ctx context.Context, log *slog.Logger, f func() error) error { + linear, err := retryutils.NewLinear(retryutils.LinearConfig{ + First: 0, + Step: 500 * time.Millisecond, + Max: 5 * time.Second, + Jitter: retryutils.HalfJitter, + }) + if err != nil { + return trace.Wrap(err) + } + + // retry a finite number of times before giving up. + const retries = 10 + for i := 0; i < retries; i++ { + err := f() + if err == nil { + return nil + } + + if isRetryable(err) { + log.DebugContext(ctx, "operation failed, retrying", "error", err) + } else { + return trace.Wrap(err) + } + + linear.Inc() + select { + case <-linear.After(): + case <-ctx.Done(): + return trace.Wrap(ctx.Err()) + } + } + return trace.Wrap(err, "too many retries") +} + +// isRetryable returns true if an error can be retried. +func isRetryable(err error) bool { + var pgErr *pgconn.PgError + err = trace.Unwrap(err) + if errors.As(err, &pgErr) { + // https://www.postgresql.org/docs/current/mvcc-serialization-failure-handling.html + switch pgErr.Code { + case pgerrcode.DeadlockDetected, pgerrcode.SerializationFailure, + pgerrcode.UniqueViolation, pgerrcode.ExclusionViolation: + return true + } + } + // Redshift reports this with a vague SQLSTATE XX000, which is the internal + // error code, but this is a serialization error that rolls back the + // transaction, so it should be retried. + if strings.Contains(err.Error(), "conflict with concurrent transaction") { + return true + } + return pgconn.SafeToRetry(err) +} diff --git a/e2e/aws/fixtures_test.go b/e2e/aws/fixtures_test.go index a7f682d799005..1b30f64f382a5 100644 --- a/e2e/aws/fixtures_test.go +++ b/e2e/aws/fixtures_test.go @@ -241,10 +241,6 @@ func withDiscoveryService(t *testing.T, discoveryGroup string, awsMatchers ...ty options.serviceConfigFuncs = append(options.serviceConfigFuncs, func(cfg *servicecfg.Config) { cfg.Discovery.Enabled = true cfg.Discovery.DiscoveryGroup = discoveryGroup - // Reduce the polling interval to speed up the test execution - // in the case of a failure of the first attempt. - // The default polling interval is 5 minutes. - cfg.Discovery.PollInterval = 1 * time.Minute cfg.Discovery.AWSMatchers = append(cfg.Discovery.AWSMatchers, awsMatchers...) }) } diff --git a/e2e/aws/rds_test.go b/e2e/aws/rds_test.go index bb8329cbe6c3f..d536d80a2d183 100644 --- a/e2e/aws/rds_test.go +++ b/e2e/aws/rds_test.go @@ -30,7 +30,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/rds" mysqlclient "github.com/go-mysql-org/go-mysql/client" "github.com/go-mysql-org/go-mysql/mysql" - "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -440,7 +439,7 @@ func testRDS(t *testing.T) { }) } -func connectAsRDSPostgresAdmin(t *testing.T, ctx context.Context, instanceID string) *pgx.Conn { +func connectAsRDSPostgresAdmin(t *testing.T, ctx context.Context, instanceID string) *pgConn { t.Helper() info := getRDSAdminInfo(t, ctx, instanceID) const dbName = "postgres" @@ -509,7 +508,7 @@ func getRDSAdminInfo(t *testing.T, ctx context.Context, instanceID string) dbUse // provisionRDSPostgresAutoUsersAdmin provisions an admin user suitable for auto-user // provisioning. -func provisionRDSPostgresAutoUsersAdmin(t *testing.T, ctx context.Context, conn *pgx.Conn, adminUser string) { +func provisionRDSPostgresAutoUsersAdmin(t *testing.T, ctx context.Context, conn *pgConn, adminUser string) { t.Helper() // Create the admin user and grant rds_iam so Teleport can auth // with IAM as an existing user. @@ -600,7 +599,7 @@ const ( autoUserWaitStep = 10 * time.Second ) -func waitForPostgresAutoUserDeactivate(t *testing.T, ctx context.Context, conn *pgx.Conn, user string) { +func waitForPostgresAutoUserDeactivate(t *testing.T, ctx context.Context, conn *pgConn, user string) { t.Helper() require.EventuallyWithT(t, func(c *assert.CollectT) { // `Query` documents that it is always safe to attempt to read from the @@ -641,7 +640,7 @@ func waitForPostgresAutoUserDeactivate(t *testing.T, ctx context.Context, conn * }, autoUserWaitDur, autoUserWaitStep, "waiting for auto user %q to be deactivated", user) } -func waitForPostgresAutoUserDrop(t *testing.T, ctx context.Context, conn *pgx.Conn, user string) { +func waitForPostgresAutoUserDrop(t *testing.T, ctx context.Context, conn *pgConn, user string) { t.Helper() require.EventuallyWithT(t, func(c *assert.CollectT) { // `Query` documents that it is always safe to attempt to read from the diff --git a/e2e/aws/redshift_test.go b/e2e/aws/redshift_test.go index 6009e3c9df7af..c8e9bbf418c20 100644 --- a/e2e/aws/redshift_test.go +++ b/e2e/aws/redshift_test.go @@ -27,7 +27,6 @@ import ( "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/redshift" - "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -96,7 +95,7 @@ func testRedshiftCluster(t *testing.T) { // eachother. labels := db.GetStaticLabels() labels[types.DatabaseAdminLabel] = "test_admin_" + randASCII(t, 6) - cluster.Process.GetAuthServer().UpdateDatabase(ctx, db) + err = cluster.Process.GetAuthServer().UpdateDatabase(ctx, db) require.NoError(t, err) adminUser := mustGetDBAdmin(t, db) @@ -213,7 +212,7 @@ func testRedshiftCluster(t *testing.T) { } } -func connectAsRedshiftClusterAdmin(t *testing.T, ctx context.Context, clusterID string) *pgx.Conn { +func connectAsRedshiftClusterAdmin(t *testing.T, ctx context.Context, clusterID string) *pgConn { t.Helper() info := getRedshiftAdminInfo(t, ctx, clusterID) const dbName = "dev" @@ -247,7 +246,7 @@ func getRedshiftAdminInfo(t *testing.T, ctx context.Context, clusterID string) d // provisionRedshiftAutoUsersAdmin provisions an admin user suitable for auto-user // provisioning. -func provisionRedshiftAutoUsersAdmin(t *testing.T, ctx context.Context, conn *pgx.Conn, adminUser string) { +func provisionRedshiftAutoUsersAdmin(t *testing.T, ctx context.Context, conn *pgConn, adminUser string) { t.Helper() // Don't cleanup the db admin after, because test runs would interfere // with each other. @@ -261,7 +260,7 @@ func provisionRedshiftAutoUsersAdmin(t *testing.T, ctx context.Context, conn *pg } } -func waitForRedshiftAutoUserDeactivate(t *testing.T, ctx context.Context, conn *pgx.Conn, user string) { +func waitForRedshiftAutoUserDeactivate(t *testing.T, ctx context.Context, conn *pgConn, user string) { t.Helper() require.EventuallyWithT(t, func(c *assert.CollectT) { // `Query` documents that it is always safe to attempt to read from the @@ -300,7 +299,7 @@ func waitForRedshiftAutoUserDeactivate(t *testing.T, ctx context.Context, conn * }, autoUserWaitDur, autoUserWaitStep, "waiting for auto user %q to be deactivated", user) } -func waitForRedshiftAutoUserDrop(t *testing.T, ctx context.Context, conn *pgx.Conn, user string) { +func waitForRedshiftAutoUserDrop(t *testing.T, ctx context.Context, conn *pgConn, user string) { t.Helper() require.EventuallyWithT(t, func(c *assert.CollectT) { // `Query` documents that it is always safe to attempt to read from the From 4a2d8916d0ca6677d60e53f963bde446e3b848e3 Mon Sep 17 00:00:00 2001 From: Paul Gottschling Date: Wed, 8 Jan 2025 14:37:21 -0500 Subject: [PATCH 15/45] Remove v13 mentions in the docs (#50831) * Remove v13 mentions in the docs Our policy is to remove mentions of the version prior to the most recent deprecated version. This change includes mentions of v13 in Helm chart values file comments. * Clarify distroless images in Helm values Responds to hugoShaka feedback. --- .../device-trust/device-management.mdx | 3 -- .../device-trust/enforcing-device-trust.mdx | 9 ++-- .../access-controls/device-trust/guide.mdx | 48 ++----------------- .../access-controls/guides/headless.mdx | 12 ++--- .../access-controls/guides/webauthn.mdx | 4 +- .../admin-guides/access-controls/sso/oidc.mdx | 16 +++---- .../deploy-a-cluster/helm-deployments/aws.mdx | 1 - .../operations/db-ca-migrations.mdx | 12 ++--- .../auto-user-provisioning/postgres.mdx | 2 +- .../database-access/split-db-ca-details.mdx | 5 +- docs/pages/includes/device-trust/prereqs.mdx | 2 +- .../zz_generated.teleport-kube-agent.mdx | 14 ++---- .../reference/access-controls/login-rules.mdx | 15 ------ .../pages/reference/access-controls/roles.mdx | 7 --- docs/pages/reference/predicate-language.mdx | 7 --- examples/chart/teleport-cluster/values.yaml | 16 +++---- .../chart/teleport-kube-agent/values.yaml | 14 ++---- 17 files changed, 43 insertions(+), 144 deletions(-) diff --git a/docs/pages/admin-guides/access-controls/device-trust/device-management.mdx b/docs/pages/admin-guides/access-controls/device-trust/device-management.mdx index 5ce724adf9461..52c019597f64b 100644 --- a/docs/pages/admin-guides/access-controls/device-trust/device-management.mdx +++ b/docs/pages/admin-guides/access-controls/device-trust/device-management.mdx @@ -13,9 +13,6 @@ token, and removing a trusted device. (!docs/pages/includes/device-trust/prereqs.mdx!) -- For clusters created after v13.3.6, Teleport supports the preset `device-admin` - role to manage devices. - ## Register a trusted device The `tctl` tool is used to manage the device inventory. A device admin is diff --git a/docs/pages/admin-guides/access-controls/device-trust/enforcing-device-trust.mdx b/docs/pages/admin-guides/access-controls/device-trust/enforcing-device-trust.mdx index 82cc5e4dff7c7..619731b02ce44 100644 --- a/docs/pages/admin-guides/access-controls/device-trust/enforcing-device-trust.mdx +++ b/docs/pages/admin-guides/access-controls/device-trust/enforcing-device-trust.mdx @@ -35,11 +35,10 @@ by the `device_trust_mode` authentication setting: (!docs/pages/includes/device-trust/prereqs.mdx!) -- We expect your Teleport cluster to be on version 13.3.6 and above, which has - the preset `require-trusted-device` role. The preset `require-trusted-device` - role does not enforce the use of a trusted device for - [Apps](#app-access-support) or [Desktops](#desktop-access-support). Refer to - their corresponding sections for instructions. +This guide makes use of the preset `require-trusted-device` role, which does not +enforce the use of a trusted device for [Apps](#app-access-support) or +[Desktops](#desktop-access-support). Refer to their corresponding sections for +instructions. ## Role-based trusted device enforcement diff --git a/docs/pages/admin-guides/access-controls/device-trust/guide.mdx b/docs/pages/admin-guides/access-controls/device-trust/guide.mdx index 62a3fe88b4db2..3eedfbc481291 100644 --- a/docs/pages/admin-guides/access-controls/device-trust/guide.mdx +++ b/docs/pages/admin-guides/access-controls/device-trust/guide.mdx @@ -45,46 +45,6 @@ protected with Teleport. root@(=clusterDefaults.nodeIP=):~# ``` -
- The preset `require-trusted-device` role, as referenced in this guide, is only available - from Teleport version 13.3.6 and above. For older Teleport cluster, you will need to update - a role with `device_trust_mode: required`. - - For simplicity, the example below updates the preset `access` role but you can update - any existing access granting role which the user is assigned with to enforce Device Trust. - - First, fetch a role so you can update it locally: - ```code - $ tctl edit role/access - ``` - - Edit the role with Device Trust mode: - ```diff - kind: role - metadata: - labels: - teleport.internal/resource-type: preset - name: access - spec: - allow: - logins: - - '{{internal.logins}}' - ... - options: - # require authenticated device check for this role - + device_trust_mode: "required" # add this line - ... - deny: - ... - - ``` - - Save your edits. - - Now that the `access` role is configured with device mode "required", users with - this role will be enforced with Device Trust. -
- Once the above prerequisites are met, begin with the following step. ## Step 1/2. Update user profile to enforce Device Trust @@ -145,12 +105,12 @@ $ tsh device enroll --current-device Device "(=devicetrust.asset_tag=)"/macOS registered and enrolled ``` - - The `--current-device` flag tells `tsh` to enroll current device. User must have the preset `editor` + + The `--current-device` flag tells `tsh` to enroll the current device. The user must have the preset `editor` or `device-admin` role to be able to self-enroll their device. For users without the `editor` or - `device-admin` roles, an enrollment token must be generated by a device admin, which can then be + `device-admin` roles, a device admin must generate the an enrollment token, which can then be used to enroll the device. Learn more about manual device enrollment in the - [device management guide](./device-management.mdx#register-a-trusted-device) + [device management guide](./device-management.mdx#register-a-trusted-device). Relogin to fetch updated certificate with device extension: diff --git a/docs/pages/admin-guides/access-controls/guides/headless.mdx b/docs/pages/admin-guides/access-controls/guides/headless.mdx index 2a39c646aef7d..04cfd9a7758fc 100644 --- a/docs/pages/admin-guides/access-controls/guides/headless.mdx +++ b/docs/pages/admin-guides/access-controls/guides/headless.mdx @@ -31,7 +31,7 @@ For example: - Machines for Headless WebAuthn activities have [Linux](../../../installation.mdx), [macOS](../../../installation.mdx) or [Windows](../../../installation.mdx) `tsh` binary installed. - Machines used to approve Headless WebAuthn requests have a Web browser with [WebAuthn support]( https://developers.yubico.com/WebAuthn/WebAuthn_Browser_Support/) or `tsh` binary installed. -- Optional: Teleport Connect v13.3.1+ for [seamless Headless WebAuthn approval](#optional-teleport-connect). +- Optional: Teleport Connect for [seamless Headless WebAuthn approval](#optional-teleport-connect). ## Step 1/3. Configuration @@ -169,9 +169,9 @@ alice@server01 $ ## Optional: Teleport Connect -Teleport Connect v13.3.1+ can also be used to approve Headless WebAuthn logins. -Teleport Connect will automatically detect the Headless WebAuthn login attempt -and allow you to approve or cancel the request. +Teleport Connect can also be used to approve Headless WebAuthn logins. Teleport +Connect will automatically detect the Headless WebAuthn login attempt and allow +you to approve or cancel the request.
![Headless Confirmation](../../../../img/headless/confirmation.png) @@ -183,10 +183,6 @@ You will be prompted to tap your MFA key to complete the approval process. ![Headless WebAuthn Approval](../../../../img/headless/approval.png)
- - This also requires a v13.3.1+ Teleport Auth Service. - - ## Troubleshooting ### "WARN: Failed to lock system memory for headless login: ..." diff --git a/docs/pages/admin-guides/access-controls/guides/webauthn.mdx b/docs/pages/admin-guides/access-controls/guides/webauthn.mdx index f6f3bdf4a0a42..425152bc0293a 100644 --- a/docs/pages/admin-guides/access-controls/guides/webauthn.mdx +++ b/docs/pages/admin-guides/access-controls/guides/webauthn.mdx @@ -246,8 +246,8 @@ The `tctl` tool is used to manage the device inventory. A device admin is responsible for managing devices, adding new devices to the inventory and removing devices that are no longer in use. - - Users with the preset `editor` or `device-admin` role (since v13.3.6) + + Users with the preset `editor` or `device-admin` role can register and enroll their device in a single step with the following command: ```code $ tsh device enroll --current-device diff --git a/docs/pages/admin-guides/access-controls/sso/oidc.mdx b/docs/pages/admin-guides/access-controls/sso/oidc.mdx index 5efb5f4301033..adf4471d50f77 100644 --- a/docs/pages/admin-guides/access-controls/sso/oidc.mdx +++ b/docs/pages/admin-guides/access-controls/sso/oidc.mdx @@ -21,8 +21,6 @@ policies like: (!docs/pages/includes/commercial-prereqs-tabs.mdx!) - (!docs/pages/includes/tctl.mdx!) -- To control the maximum age of users' sessions before they will be forced to - reauthenticate, your Teleport cluster must be on version 13.3.7 or above. ## Identity Providers @@ -197,13 +195,13 @@ spec: ### Optional: Max age -Teleport has supported setting the `max_age` field since version 13.3.7 to control the -maximum age of users' sessions before they will be forced to reauthenticate. By -default `max_age` is unset, meaning once a user authenticates using OIDC they will -not have to reauthenticate unless the configured OIDC provider forces them to. This -can be set to a duration of time to force users to reauthenticate more often. If -`max_age` is set to zero seconds, users will be forced to reauthenticate with their -OIDC provider every time they authenticate with Teleport. +The `max_age` field controls the maximum age of users' sessions before they will +be forced to reauthenticate. By default `max_age` is unset, meaning once a user +authenticates using OIDC they will not have to reauthenticate unless the +configured OIDC provider forces them to. This can be set to a duration of time +to force users to reauthenticate more often. If `max_age` is set to zero +seconds, users will be forced to reauthenticate with their OIDC provider every +time they authenticate with Teleport. Note that the specified duration must be in whole seconds. `24h` works because that's the same as `1440s`, but `60s500ms` would not be allowed as that is 60.5 seconds. diff --git a/docs/pages/admin-guides/deploy-a-cluster/helm-deployments/aws.mdx b/docs/pages/admin-guides/deploy-a-cluster/helm-deployments/aws.mdx index de375be78b5df..70c41f1ef5cb5 100644 --- a/docs/pages/admin-guides/deploy-a-cluster/helm-deployments/aws.mdx +++ b/docs/pages/admin-guides/deploy-a-cluster/helm-deployments/aws.mdx @@ -109,7 +109,6 @@ You should be aware of these potential limitations and differences when using La that it terminate all inbound TLS traffic itself on the Teleport proxy. This is not directly possible when using a Layer 7 load balancer, so the `tsh` client implements this flow itself [using ALPN connection upgrades](../../../reference/architecture/tls-routing.mdx). -- The use of Teleport and `tsh` v13 or higher is required. Using ACM with an ALB also requires that your cluster has a fully functional installation of the AWS Load Balancer diff --git a/docs/pages/admin-guides/management/operations/db-ca-migrations.mdx b/docs/pages/admin-guides/management/operations/db-ca-migrations.mdx index 7a9cdb32b0a37..a890a38f8bd30 100644 --- a/docs/pages/admin-guides/management/operations/db-ca-migrations.mdx +++ b/docs/pages/admin-guides/management/operations/db-ca-migrations.mdx @@ -12,10 +12,8 @@ the Teleport cluster. Teleport (= db_client_ca.released_version.v15 =) introduced the `db_client` CA to split the responsibilities of the Teleport `db` CA, which was acting as both -host and client CA for Teleport self-hosted database access. -The `db_client` CA was also added as a patch in Teleport -(= db_client_ca.released_version.v13 =) and -(= db_client_ca.released_version.v14 =). +host and client CA for Teleport self-hosted database access. The `db_client` CA +was also added as a patch in Teleport (= db_client_ca.released_version.v14 =). The `db` and `db_client` CAs were both introduced as an automatic migration that occurs after upgrading Teleport. @@ -113,8 +111,7 @@ However, for defense in depth, these databases should only mTLS handshake with a client that presents a `db_client` CA-issued certificate. If your Teleport cluster was upgraded to Teleport -\>=(= db_client_ca.released_version.v13 =), -\>=(= db_client_ca.released_version.v14 =), or +\>=(= db_client_ca.released_version.v14 =) or \>=(= db_client_ca.released_version.v15 =), then you should ensure that you have completed the `db_client` migration. To complete the `db_client` CA migration: @@ -144,8 +141,7 @@ and you have not rotated *both* your `host` and `db` CAs at least once since upgrading, then you should complete the `db` CA migration. If you upgraded an existing cluster to Teleport -\>=(= db_client_ca.released_version.v13 =), -\>=(= db_client_ca.released_version.v14 =), or +\>=(= db_client_ca.released_version.v14 =) or \>=(= db_client_ca.released_version.v15 =) and you have not rotated *both* your `db` and `db_client` CAs at least once since upgrading, then you should complete diff --git a/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx b/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx index 8618c8a88c099..2fb8c7c1aac83 100644 --- a/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx +++ b/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx @@ -7,7 +7,7 @@ description: Configure automatic user provisioning for PostgreSQL. ## Prerequisites -- Teleport cluster v13.1 or above with a configured [self-hosted +- Teleport cluster with a configured [self-hosted PostgreSQL](../enroll-self-hosted-databases/postgres-self-hosted.mdx) or [RDS PostgreSQL](../enroll-aws-databases/rds.mdx) database. To configure permissions for database objects like tables, your cluster must be on version diff --git a/docs/pages/includes/database-access/split-db-ca-details.mdx b/docs/pages/includes/database-access/split-db-ca-details.mdx index 8dc448464989f..b50544c463ffd 100644 --- a/docs/pages/includes/database-access/split-db-ca-details.mdx +++ b/docs/pages/includes/database-access/split-db-ca-details.mdx @@ -17,9 +17,8 @@ needs to have a long-lived certificate issued by another CA that its peer node trusts. The split `db` and `db_client` CA architecture was introduced as a security fix -in Teleport versions: -(= db_client_ca.released_version.v13 =), -(= db_client_ca.released_version.v14 =), and +in Teleport versions +(= db_client_ca.released_version.v14 =) and (= db_client_ca.released_version.v15 =). See diff --git a/docs/pages/includes/device-trust/prereqs.mdx b/docs/pages/includes/device-trust/prereqs.mdx index 32699c20b21a7..6447d7c6dd8cf 100644 --- a/docs/pages/includes/device-trust/prereqs.mdx +++ b/docs/pages/includes/device-trust/prereqs.mdx @@ -4,7 +4,7 @@ - To enroll a Windows device, you need: - A device with TPM 2.0. - A user with administrator privileges. This is only required during enrollment. - - `tsh` v13.1.2 or newer. [Download the Windows tsh installer](../../installation.mdx#windows-tsh-and-tctl-clients-only). + - The `tsh` client. [Download the Windows tsh installer](../../installation.mdx#windows-tsh-and-tctl-clients-only). - To enroll a Linux device, you need: - A device with TPM 2.0. - A user with permissions to use the /dev/tpmrm0 device (typically done by diff --git a/docs/pages/includes/helm-reference/zz_generated.teleport-kube-agent.mdx b/docs/pages/includes/helm-reference/zz_generated.teleport-kube-agent.mdx index f7b7542c5311f..3cf958de6fbe6 100644 --- a/docs/pages/includes/helm-reference/zz_generated.teleport-kube-agent.mdx +++ b/docs/pages/includes/helm-reference/zz_generated.teleport-kube-agent.mdx @@ -1127,11 +1127,8 @@ For this reason, it is strongly discouraged to set a custom image when using automatic updates. Teleport Cloud uses automatic updates by default. -Since version 13, hardened distroless images are used by default. You can use -the deprecated debian-based images by setting the value to -`public.ecr.aws/gravitational/teleport`. Those images will be removed with -teleport 15. - +By default, the image contains only the Teleport application and its runtime +dependencies, and does not contain a shell. This setting only takes effect when [`enterprise`](#enterprise) is `false`. When running an enterprise version, you must use [`enterpriseImage`](#enterpriseImage) instead. @@ -1157,11 +1154,8 @@ Teleport-published image. using automatic updates. Teleport Cloud uses automatic updates by default. -Since version 13, hardened distroless images are used by default. -You can use the deprecated debian-based images by setting the value to -`public.ecr.aws/gravitational/teleport-ent`. Those images will be -removed with teleport 15. - +By default, the image contains only the Teleport application and its runtime +dependencies, and does not contain a shell. This setting only takes effect when [`enterprise`](#enterprise) is `true`. When running an enterprise version, you must use [`image`](#image) instead. diff --git a/docs/pages/reference/access-controls/login-rules.mdx b/docs/pages/reference/access-controls/login-rules.mdx index d49782f74feed..fdf24fe1efe45 100644 --- a/docs/pages/reference/access-controls/login-rules.mdx +++ b/docs/pages/reference/access-controls/login-rules.mdx @@ -584,11 +584,6 @@ Expression | Result ### `strings.split` - -The `strings.split` helper was introduced in Teleport v13.3.0. All Auth Service -instances must be running this version or greater before it can be used. - - #### Signature ```go @@ -625,11 +620,6 @@ Expression | Result ### `email.local` - -The `email.local` helper was introduced in Teleport v13.3.0. All Auth Service instances -must be running this version or greater before it can be used. - - #### Signature ```go @@ -661,11 +651,6 @@ Expression | Result ### `regexp.replace` - -The `regexp.replace` helper was introduced in Teleport v13.3.0. All Auth Service instances -must be running this version or greater before it can be used. - - #### Signature ```go diff --git a/docs/pages/reference/access-controls/roles.mdx b/docs/pages/reference/access-controls/roles.mdx index c67dd234b8642..5d04f382a28bf 100644 --- a/docs/pages/reference/access-controls/roles.mdx +++ b/docs/pages/reference/access-controls/roles.mdx @@ -189,13 +189,6 @@ spec: ### Label expressions - -Label expressions are available starting in Teleport version `13.1.1`. -All components of your Teleport cluster must be upgraded to version `13.1.1` -or newer before you will be able to use label expressions. -This includes the Auth Service and **all** Teleport agents. - - Teleport roles also support matching resource labels with predicate expressions when you need to: diff --git a/docs/pages/reference/predicate-language.mdx b/docs/pages/reference/predicate-language.mdx index 921436f125519..adeda2509b85d 100644 --- a/docs/pages/reference/predicate-language.mdx +++ b/docs/pages/reference/predicate-language.mdx @@ -76,13 +76,6 @@ See some [examples](cli/cli.mdx) of the different ways you can filter resources. ## Label expressions - -Label expressions are available starting in Teleport version `13.1.1`. -All components of your Teleport cluster must be upgraded to version `13.1.1` -or newer before you will be able to use label expressions. -This includes the Auth Service and **all** Teleport agents. - - Label expressions can be used in Teleport roles to define access to resources with custom logic. Check out the Access Controls diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index 7e948c15a3570..6a11b492a9879 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -568,17 +568,13 @@ tls: # Values that you shouldn't need to change. ################################################## -# Container image for the cluster. -# Since version 13, hardened distroless images are used by default. -# You can use the deprecated debian-based images by setting the value to -# `public.ecr.aws/gravitational/teleport`. Those images will be -# removed with teleport 14. +# Container image for the cluster. By default, the image contains only the +# Teleport application and its runtime dependencies, and does not contain a +# shell. image: public.ecr.aws/gravitational/teleport-distroless -# Enterprise version of the image -# Since version 13, hardened distroless images are used by default. -# You can use the deprecated debian-based images by setting the value to -# `public.ecr.aws/gravitational/teleport-ent`. Those images will be -# removed with teleport 14. +# Enterprise version of the image. By default, the image contains only the +# Teleport application and its runtime dependencies, and does not contain a +# shell. enterpriseImage: public.ecr.aws/gravitational/teleport-ent-distroless # Optional array of imagePullSecrets, to use when pulling from a private registry imagePullSecrets: [] diff --git a/examples/chart/teleport-kube-agent/values.yaml b/examples/chart/teleport-kube-agent/values.yaml index c51491783e11c..9b7783e022c11 100644 --- a/examples/chart/teleport-kube-agent/values.yaml +++ b/examples/chart/teleport-kube-agent/values.yaml @@ -891,11 +891,8 @@ adminClusterRoleBinding: # automatic updates. Teleport Cloud uses automatic updates by default. # # -# Since version 13, hardened distroless images are used by default. You can use -# the deprecated debian-based images by setting the value to -# `public.ecr.aws/gravitational/teleport`. Those images will be removed with -# teleport 15. -# +# By default, the image contains only the Teleport application and its runtime +# dependencies, and does not contain a shell. # This setting only takes effect when [`enterprise`](#enterprise) is `false`. # When running an enterprise version, you must use # [`enterpriseImage`](#enterpriseImage) instead. @@ -916,11 +913,8 @@ image: public.ecr.aws/gravitational/teleport-distroless # using automatic updates. Teleport Cloud uses automatic updates by default. #
# -# Since version 13, hardened distroless images are used by default. -# You can use the deprecated debian-based images by setting the value to -# `public.ecr.aws/gravitational/teleport-ent`. Those images will be -# removed with teleport 15. -# +# By default, the image contains only the Teleport application and its runtime +# dependencies, and does not contain a shell. # This setting only takes effect when [`enterprise`](#enterprise) is `true`. # When running an enterprise version, you must use [`image`](#image) instead. enterpriseImage: public.ecr.aws/gravitational/teleport-ent-distroless From 486176a9fd4fa60dc14d34fc323f70d894688413 Mon Sep 17 00:00:00 2001 From: Brian Joerger Date: Wed, 8 Jan 2025 12:12:39 -0800 Subject: [PATCH 16/45] Fix flaky test `TestIntegrations/X11Forwarding` (#50852) * Stagger outer eventually loop to ensure the inner eventually loop has time to complete, avoid racing on the display channel. * Use select case. --- integration/integration_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/integration/integration_test.go b/integration/integration_test.go index 43f2a358e51b8..0b48c90b46f39 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -4853,12 +4853,15 @@ func testX11Forwarding(t *testing.T, suite *integrationTestSuite) { assert.Eventually(t, func() bool { output, err := os.ReadFile(tmpFile.Name()) if err == nil && len(output) != 0 { - display <- strings.TrimSpace(string(output)) + select { + case display <- strings.TrimSpace(string(output)): + default: + } return true } return false }, time.Second, 100*time.Millisecond, "failed to read display") - }, 10*time.Second, time.Second) + }, 10*time.Second, 1*time.Second) // Make a new connection to the XServer proxy to confirm that forwarding is working. serverDisplay, err := x11.ParseDisplay(<-display) From a0b526bc43eb7c9dfd5b04b36bdf25a4b246acdc Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Wed, 8 Jan 2025 16:51:04 -0500 Subject: [PATCH 17/45] autoupdate rollout: honour the maintenance window duration (#50745) * autoupdate rollout: honour the maintenance window duration * Update lib/autoupdate/rollout/reconciler.go Co-authored-by: Bartosz Leper * Address feedback * Update lib/autoupdate/rollout/strategy.go --------- Co-authored-by: Bartosz Leper --- lib/autoupdate/rollout/reconciler.go | 21 ++--- lib/autoupdate/rollout/reconciler_test.go | 2 +- lib/autoupdate/rollout/strategy.go | 19 +++-- .../rollout/strategy_haltonerror.go | 9 ++- .../rollout/strategy_haltonerror_test.go | 2 +- lib/autoupdate/rollout/strategy_test.go | 76 ++++++++++++++----- lib/autoupdate/rollout/strategy_timebased.go | 14 +++- .../rollout/strategy_timebased_test.go | 8 +- 8 files changed, 106 insertions(+), 45 deletions(-) diff --git a/lib/autoupdate/rollout/reconciler.go b/lib/autoupdate/rollout/reconciler.go index 02f393a58ff8d..2a3282fa4670b 100644 --- a/lib/autoupdate/rollout/reconciler.go +++ b/lib/autoupdate/rollout/reconciler.go @@ -202,11 +202,12 @@ func (r *reconciler) buildRolloutSpec(config *autoupdate.AutoUpdateConfigSpecAge } return &autoupdate.AutoUpdateAgentRolloutSpec{ - StartVersion: version.GetStartVersion(), - TargetVersion: version.GetTargetVersion(), - Schedule: version.GetSchedule(), - AutoupdateMode: mode, - Strategy: strategy, + StartVersion: version.GetStartVersion(), + TargetVersion: version.GetTargetVersion(), + Schedule: version.GetSchedule(), + AutoupdateMode: mode, + Strategy: strategy, + MaintenanceWindowDuration: config.GetMaintenanceWindowDuration(), }, nil } @@ -318,7 +319,7 @@ func (r *reconciler) computeStatus( } status.Groups = groups - err = r.progressRollout(ctx, newSpec.GetStrategy(), status, now) + err = r.progressRollout(ctx, newSpec, status, now) // Failing to progress the update is not a hard failure. // We want to update the status even if something went wrong to surface the failed reconciliation and potential errors to the user. if err != nil { @@ -334,13 +335,13 @@ func (r *reconciler) computeStatus( // groups are updated in place. // If an error is returned, the groups should still be upserted, depending on the strategy, // failing to update a group might not be fatal (other groups can still progress independently). -func (r *reconciler) progressRollout(ctx context.Context, strategyName string, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { +func (r *reconciler) progressRollout(ctx context.Context, spec *autoupdate.AutoUpdateAgentRolloutSpec, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { for _, strategy := range r.rolloutStrategies { - if strategy.name() == strategyName { - return strategy.progressRollout(ctx, status, now) + if strategy.name() == spec.GetStrategy() { + return strategy.progressRollout(ctx, spec, status, now) } } - return trace.NotImplemented("rollout strategy %q not implemented", strategyName) + return trace.NotImplemented("rollout strategy %q not implemented", spec.GetStrategy()) } // makeGroupStatus creates the autoupdate_agent_rollout.status.groups based on the autoupdate_config. diff --git a/lib/autoupdate/rollout/reconciler_test.go b/lib/autoupdate/rollout/reconciler_test.go index c2739685b7f72..af14136a3d156 100644 --- a/lib/autoupdate/rollout/reconciler_test.go +++ b/lib/autoupdate/rollout/reconciler_test.go @@ -714,7 +714,7 @@ func (f *fakeRolloutStrategy) name() string { return f.strategyName } -func (f *fakeRolloutStrategy) progressRollout(ctx context.Context, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { +func (f *fakeRolloutStrategy) progressRollout(ctx context.Context, spec *autoupdate.AutoUpdateAgentRolloutSpec, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { f.calls++ return nil } diff --git a/lib/autoupdate/rollout/strategy.go b/lib/autoupdate/rollout/strategy.go index d2f8c2da81f93..d5b8236ce8f90 100644 --- a/lib/autoupdate/rollout/strategy.go +++ b/lib/autoupdate/rollout/strategy.go @@ -40,10 +40,14 @@ const ( // This interface allows us to inject dummy strategies for simpler testing. type rolloutStrategy interface { name() string - progressRollout(context.Context, *autoupdate.AutoUpdateAgentRolloutStatus, time.Time) error + // progressRollout takes the new rollout spec, existing rollout status and current time. + // It updates the status resource in-place to progress the rollout to the next step if possible/needed. + progressRollout(context.Context, *autoupdate.AutoUpdateAgentRolloutSpec, *autoupdate.AutoUpdateAgentRolloutStatus, time.Time) error } -func inWindow(group *autoupdate.AutoUpdateAgentRolloutStatusGroup, now time.Time) (bool, error) { +// inWindow checks if the time is in the group's maintenance window. +// The maintenance window is the semi-open interval: [windowStart, windowEnd). +func inWindow(group *autoupdate.AutoUpdateAgentRolloutStatusGroup, now time.Time, duration time.Duration) (bool, error) { dayOK, err := canUpdateToday(group.ConfigDays, now) if err != nil { return false, trace.Wrap(err, "checking the day of the week") @@ -51,17 +55,22 @@ func inWindow(group *autoupdate.AutoUpdateAgentRolloutStatusGroup, now time.Time if !dayOK { return false, nil } - return int(group.ConfigStartHour) == now.Hour(), nil + + // We compute the theoretical window start and end + windowStart := now.Truncate(24 * time.Hour).Add(time.Duration(group.ConfigStartHour) * time.Hour) + windowEnd := windowStart.Add(duration) + + return !now.Before(windowStart) && now.Before(windowEnd), nil } // rolloutChangedInWindow checks if the rollout got created after the theoretical group start time -func rolloutChangedInWindow(group *autoupdate.AutoUpdateAgentRolloutStatusGroup, now, rolloutStart time.Time) (bool, error) { +func rolloutChangedInWindow(group *autoupdate.AutoUpdateAgentRolloutStatusGroup, now, rolloutStart time.Time, duration time.Duration) (bool, error) { // If the rollout is older than 24h, we know it did not change during the window if now.Sub(rolloutStart) > 24*time.Hour { return false, nil } // Else we check if the rollout happened in the group window. - return inWindow(group, rolloutStart) + return inWindow(group, rolloutStart, duration) } func canUpdateToday(allowedDays []string, now time.Time) (bool, error) { diff --git a/lib/autoupdate/rollout/strategy_haltonerror.go b/lib/autoupdate/rollout/strategy_haltonerror.go index 6ed1a4aae049d..fafc5d5ae30d3 100644 --- a/lib/autoupdate/rollout/strategy_haltonerror.go +++ b/lib/autoupdate/rollout/strategy_haltonerror.go @@ -35,6 +35,7 @@ const ( updateReasonPreviousGroupsNotDone = "previous_groups_not_done" updateReasonUpdateComplete = "update_complete" updateReasonUpdateInProgress = "update_in_progress" + haltOnErrorWindowDuration = time.Hour ) type haltOnErrorStrategy struct { @@ -54,7 +55,7 @@ func newHaltOnErrorStrategy(log *slog.Logger) (rolloutStrategy, error) { }, nil } -func (h *haltOnErrorStrategy) progressRollout(ctx context.Context, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { +func (h *haltOnErrorStrategy) progressRollout(ctx context.Context, _ *autoupdate.AutoUpdateAgentRolloutSpec, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { // We process every group in order, all the previous groups must be in the DONE state // for the next group to become active. Even if some early groups are not DONE, // later groups might be ACTIVE and need to transition to DONE, so we cannot @@ -81,7 +82,7 @@ func (h *haltOnErrorStrategy) progressRollout(ctx context.Context, status *autou } // Check if the rollout got created after the theoretical group start time - rolloutChangedDuringWindow, err := rolloutChangedInWindow(group, now, status.StartTime.AsTime()) + rolloutChangedDuringWindow, err := rolloutChangedInWindow(group, now, status.StartTime.AsTime(), haltOnErrorWindowDuration) if err != nil { setGroupState(group, group.State, updateReasonReconcilerError, now) return err @@ -149,14 +150,14 @@ func canStartHaltOnError(group, previousGroup *autoupdate.AutoUpdateAgentRollout } } - return inWindow(group, now) + return inWindow(group, now, haltOnErrorWindowDuration) } func isDoneHaltOnError(group *autoupdate.AutoUpdateAgentRolloutStatusGroup, now time.Time) (bool, string) { // Currently we don't implement status reporting from groups/agents. // So we just wait 60 minutes and consider the maintenance done. // This will change as we introduce agent status report and aggregated agent counts. - if group.StartTime.AsTime().Add(time.Hour).Before(now) { + if group.StartTime.AsTime().Add(haltOnErrorWindowDuration).Before(now) { return true, updateReasonUpdateComplete } return false, updateReasonUpdateInProgress diff --git a/lib/autoupdate/rollout/strategy_haltonerror_test.go b/lib/autoupdate/rollout/strategy_haltonerror_test.go index ee3eb8e80ffca..2f59534ddd7db 100644 --- a/lib/autoupdate/rollout/strategy_haltonerror_test.go +++ b/lib/autoupdate/rollout/strategy_haltonerror_test.go @@ -500,7 +500,7 @@ func Test_progressGroupsHaltOnError(t *testing.T) { State: 0, StartTime: tt.rolloutStartTime, } - err := strategy.progressRollout(ctx, status, clock.Now()) + err := strategy.progressRollout(ctx, nil, status, clock.Now()) require.NoError(t, err) // We use require.Equal instead of Elements match because group order matters. // It's not super important for time-based, but is crucial for halt-on-error. diff --git a/lib/autoupdate/rollout/strategy_test.go b/lib/autoupdate/rollout/strategy_test.go index 1348716ba6c1d..0711d4043ae9c 100644 --- a/lib/autoupdate/rollout/strategy_test.go +++ b/lib/autoupdate/rollout/strategy_test.go @@ -95,11 +95,12 @@ func Test_canUpdateToday(t *testing.T) { func Test_inWindow(t *testing.T) { tests := []struct { - name string - group *autoupdate.AutoUpdateAgentRolloutStatusGroup - now time.Time - want bool - wantErr require.ErrorAssertionFunc + name string + group *autoupdate.AutoUpdateAgentRolloutStatusGroup + now time.Time + duration time.Duration + want bool + wantErr require.ErrorAssertionFunc }{ { name: "out of window", @@ -107,9 +108,10 @@ func Test_inWindow(t *testing.T) { ConfigDays: everyWeekdayButSunday, ConfigStartHour: matchingStartHour, }, - now: testSunday, - want: false, - wantErr: require.NoError, + now: testSunday, + duration: time.Hour, + want: false, + wantErr: require.NoError, }, { name: "inside window, wrong hour", @@ -117,9 +119,10 @@ func Test_inWindow(t *testing.T) { ConfigDays: everyWeekday, ConfigStartHour: nonMatchingStartHour, }, - now: testSunday, - want: false, - wantErr: require.NoError, + now: testSunday, + duration: time.Hour, + want: false, + wantErr: require.NoError, }, { name: "inside window, correct hour", @@ -127,9 +130,10 @@ func Test_inWindow(t *testing.T) { ConfigDays: everyWeekday, ConfigStartHour: matchingStartHour, }, - now: testSunday, - want: true, - wantErr: require.NoError, + now: testSunday, + duration: time.Hour, + want: true, + wantErr: require.NoError, }, { name: "invalid weekdays", @@ -137,14 +141,48 @@ func Test_inWindow(t *testing.T) { ConfigDays: []string{"HelloThereGeneralKenobi"}, ConfigStartHour: matchingStartHour, }, - now: testSunday, - want: false, - wantErr: require.Error, + now: testSunday, + duration: time.Hour, + want: false, + wantErr: require.Error, + }, + { + name: "short window", + group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{ + ConfigDays: everyWeekday, + ConfigStartHour: matchingStartHour, + }, + now: testSunday, + duration: time.Second, + want: false, + wantErr: require.NoError, + }, + { + name: "window start time is included", + group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{ + ConfigDays: everyWeekday, + ConfigStartHour: matchingStartHour, + }, + now: testSunday.Truncate(24 * time.Hour).Add(time.Duration(matchingStartHour) * time.Hour), + duration: time.Hour, + want: true, + wantErr: require.NoError, + }, + { + name: "window end time is not included", + group: &autoupdate.AutoUpdateAgentRolloutStatusGroup{ + ConfigDays: everyWeekday, + ConfigStartHour: matchingStartHour, + }, + now: testSunday.Truncate(24 * time.Hour).Add(time.Duration(matchingStartHour+1) * time.Hour), + duration: time.Hour, + want: false, + wantErr: require.NoError, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := inWindow(tt.group, tt.now) + got, err := inWindow(tt.group, tt.now, tt.duration) tt.wantErr(t, err) require.Equal(t, tt.want, got) }) @@ -205,7 +243,7 @@ func Test_rolloutChangedInWindow(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Test execution. - result, err := rolloutChangedInWindow(group, tt.now, tt.rolloutStart) + result, err := rolloutChangedInWindow(group, tt.now, tt.rolloutStart, time.Hour) require.NoError(t, err) require.Equal(t, tt.want, result) }) diff --git a/lib/autoupdate/rollout/strategy_timebased.go b/lib/autoupdate/rollout/strategy_timebased.go index 5d06adf0e5ace..e4df5c6e23789 100644 --- a/lib/autoupdate/rollout/strategy_timebased.go +++ b/lib/autoupdate/rollout/strategy_timebased.go @@ -51,7 +51,13 @@ func newTimeBasedStrategy(log *slog.Logger) (rolloutStrategy, error) { }, nil } -func (h *timeBasedStrategy) progressRollout(ctx context.Context, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { +func (h *timeBasedStrategy) progressRollout(ctx context.Context, spec *autoupdate.AutoUpdateAgentRolloutSpec, status *autoupdate.AutoUpdateAgentRolloutStatus, now time.Time) error { + windowDuration := spec.GetMaintenanceWindowDuration().AsDuration() + // Backward compatibility for resources previously created without duration. + if windowDuration == 0 { + windowDuration = haltOnErrorWindowDuration + } + // We always process every group regardless of the order. var errs []error for _, group := range status.Groups { @@ -61,7 +67,7 @@ func (h *timeBasedStrategy) progressRollout(ctx context.Context, status *autoupd // We start any group unstarted group in window. // Done groups can transition back to active if they enter their maintenance window again. // Some agents might have missed the previous windows and might expected to try again. - shouldBeActive, err := inWindow(group, now) + shouldBeActive, err := inWindow(group, now, windowDuration) if err != nil { // In time-based rollouts, groups are not dependent. // Failing to transition a group should affect other groups. @@ -72,7 +78,7 @@ func (h *timeBasedStrategy) progressRollout(ctx context.Context, status *autoupd } // Check if the rollout got created after the theoretical group start time - rolloutChangedDuringWindow, err := rolloutChangedInWindow(group, now, status.StartTime.AsTime()) + rolloutChangedDuringWindow, err := rolloutChangedInWindow(group, now, status.StartTime.AsTime(), windowDuration) if err != nil { setGroupState(group, group.State, updateReasonReconcilerError, now) errs = append(errs, err) @@ -93,7 +99,7 @@ func (h *timeBasedStrategy) progressRollout(ctx context.Context, status *autoupd case autoupdate.AutoUpdateAgentGroupState_AUTO_UPDATE_AGENT_GROUP_STATE_ACTIVE: // The group is currently being updated. We check if the maintenance // is over and if we should transition it to the done state - shouldBeActive, err := inWindow(group, now) + shouldBeActive, err := inWindow(group, now, windowDuration) if err != nil { // In time-based rollouts, groups are not dependent. // Failing to transition a group should affect other groups. diff --git a/lib/autoupdate/rollout/strategy_timebased_test.go b/lib/autoupdate/rollout/strategy_timebased_test.go index 84367f9927c04..6fa6245598a15 100644 --- a/lib/autoupdate/rollout/strategy_timebased_test.go +++ b/lib/autoupdate/rollout/strategy_timebased_test.go @@ -25,6 +25,7 @@ import ( "github.com/jonboulle/clockwork" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/timestamppb" "github.com/gravitational/teleport/api/gen/proto/go/teleport/autoupdate/v1" @@ -325,6 +326,11 @@ func Test_progressGroupsTimeBased(t *testing.T) { }, }, } + + spec := &autoupdate.AutoUpdateAgentRolloutSpec{ + MaintenanceWindowDuration: durationpb.New(time.Hour), + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { status := &autoupdate.AutoUpdateAgentRolloutStatus{ @@ -332,7 +338,7 @@ func Test_progressGroupsTimeBased(t *testing.T) { State: 0, StartTime: tt.rolloutStartTime, } - err := strategy.progressRollout(ctx, status, clock.Now()) + err := strategy.progressRollout(ctx, spec, status, clock.Now()) require.NoError(t, err) // We use require.Equal instead of Elements match because group order matters. // It's not super important for time-based, but is crucial for halt-on-error. From 1a01eb4efee5032cf77667f1b54d9fd6a46bcc73 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Wed, 8 Jan 2025 17:45:16 -0500 Subject: [PATCH 18/45] Fix flaky `TestInitDatabaseService/enabled_invalid_databases` (#50702) --- lib/service/service_test.go | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/lib/service/service_test.go b/lib/service/service_test.go index 16309ed59ac72..38ee9918008c4 100644 --- a/lib/service/service_test.go +++ b/lib/service/service_test.go @@ -1829,15 +1829,22 @@ func TestInitDatabaseService(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() + // Arbitrary channel size to avoid blocking. + // We should not receive more than 1024 events as we have less than 1024 services. + serviceExitedEvents := make(chan Event, 1024) + var eg errgroup.Group process, err := NewTeleport(cfg) require.NoError(t, err) + process.ListenForEvents(ctx, ServiceExitedWithErrorEvent, serviceExitedEvents) require.NoError(t, process.Start()) eg.Go(func() error { return process.WaitForSignals(ctx, nil) }) // Ensures the process is closed in failure scenarios. t.Cleanup(func() { cancel() _ = eg.Wait() + _ = process.Close() + require.NoError(t, process.Wait()) }) if !test.expectErr { @@ -1846,15 +1853,24 @@ func TestInitDatabaseService(t *testing.T) { require.NoError(t, process.Close()) // Expect Teleport to shutdown without reporting any issue. require.NoError(t, eg.Wait()) + require.NoError(t, process.Wait()) return } - event, err := process.WaitForEvent(ctx, ServiceExitedWithErrorEvent) - require.NoError(t, err) - require.NotNil(t, event) - exitPayload, ok := event.Payload.(ExitEventPayload) - require.True(t, ok, "expected ExitEventPayload but got %T", event.Payload) - require.Equal(t, "db.init", exitPayload.Service.Name()) + // The first service to exit should be the db one, with a "db.init" event. + // We can't use WaitForEvents because it only returns the last event for this type. + // As the test causes Teleport to crash, other services might exit in error before + // we get the event, causing the test to fail. + select { + case event := <-serviceExitedEvents: + require.NotNil(t, event) + exitPayload, ok := event.Payload.(ExitEventPayload) + require.True(t, ok, "expected ExitEventPayload but got %T", event.Payload) + require.Equal(t, "db.init", exitPayload.Service.Name(), "expected db init failure, got instead %q with error %q", exitPayload.Service.Name(), exitPayload.Error) + case <-ctx.Done(): + require.Fail(t, "context timed out, we never received the failed db.init event") + } + // Database service init is a critical service, meaning failures on // it should cause the process to exit with error. require.Error(t, eg.Wait()) From 059ff041aa9433b0f8f051cbb8eefb499c870c43 Mon Sep 17 00:00:00 2001 From: Trent Clarke Date: Thu, 9 Jan 2025 10:00:04 +1100 Subject: [PATCH 19/45] Allow Reconciler to change resource origin (#50851) By default, the resource reconciler disalows changing a resource origin in order to enforce the segregation of resources created from different sources. This patch introduces an option to allow the reconciler to change a resource's origin, bypassing the origin change check if enabled. This is part of addressing #50654 --- lib/services/reconciler.go | 32 ++++++++++++++++++++------------ lib/services/reconciler_test.go | 30 +++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/lib/services/reconciler.go b/lib/services/reconciler.go index 17b136a056152..9aa23cac0007f 100644 --- a/lib/services/reconciler.go +++ b/lib/services/reconciler.go @@ -55,6 +55,11 @@ type GenericReconcilerConfig[K comparable, T any] struct { OnDelete func(context.Context, T) error // Logger emits log messages. Logger *slog.Logger + // AllowOriginChanges is a flag that allows the reconciler to change the + // origin value of a reconciled resource. By default, origin changes are + // disallowed to enforce segregation between of resources from different + // sources. + AllowOriginChanges bool } // CheckAndSetDefaults validates the reconciler configuration and sets defaults. @@ -177,18 +182,21 @@ func (r *GenericReconciler[K, T]) processNewResource(ctx context.Context, curren return nil } - // Don't overwrite resource of a different origin (e.g., keep static resource from config and ignore dynamic resource) - registeredOrigin, err := types.GetOrigin(registered) - if err != nil { - return trace.Wrap(err) - } - newOrigin, err := types.GetOrigin(newT) - if err != nil { - return trace.Wrap(err) - } - if registeredOrigin != newOrigin { - r.logger.WarnContext(ctx, "New resource has different origin, not updating", "name", key, "new_origin", newOrigin, "existing_origin", registeredOrigin) - return nil + if !r.cfg.AllowOriginChanges { + // Don't overwrite resource of a different origin (e.g., keep static resource from config and ignore dynamic resource) + registeredOrigin, err := types.GetOrigin(registered) + if err != nil { + return trace.Wrap(err) + } + newOrigin, err := types.GetOrigin(newT) + if err != nil { + return trace.Wrap(err) + } + if registeredOrigin != newOrigin { + r.logger.WarnContext(ctx, "New resource has different origin, not updating", + "name", key, "new_origin", newOrigin, "existing_origin", registeredOrigin) + return nil + } } // If the resource is already registered but was updated, see if its diff --git a/lib/services/reconciler_test.go b/lib/services/reconciler_test.go index 37ca13e1447a0..ad97fb61c904d 100644 --- a/lib/services/reconciler_test.go +++ b/lib/services/reconciler_test.go @@ -43,6 +43,7 @@ func TestReconciler(t *testing.T) { onCreateCalls []testResource onUpdateCalls []updateCall onDeleteCalls []testResource + configure func(cfg *ReconcilerConfig[testResource]) comparator func(testResource, testResource) int }{ { @@ -73,13 +74,30 @@ func TestReconciler(t *testing.T) { }, }, { - description: "resources with different origins don't overwrite each other", + description: "resources with different origins don't overwrite each other by default", selectors: []ResourceMatcher{{ Labels: types.Labels{"*": []string{"*"}}, }}, registeredResources: []testResource{makeStaticResource("res1", nil)}, newResources: []testResource{makeDynamicResource("res1", nil)}, }, + { + description: "resources with different origins overwrite each other when allowed", + selectors: []ResourceMatcher{{ + Labels: types.Labels{"*": []string{"*"}}, + }}, + configure: func(cfg *ReconcilerConfig[testResource]) { + cfg.AllowOriginChanges = true + }, + registeredResources: []testResource{makeStaticResource("res1", nil)}, + newResources: []testResource{makeDynamicResource("res1", nil)}, + onUpdateCalls: []updateCall{ + { + old: makeStaticResource("res1", nil), + new: makeDynamicResource("res1", nil), + }, + }, + }, { description: "resource that's no longer present should be removed", selectors: []ResourceMatcher{{ @@ -198,7 +216,7 @@ func TestReconciler(t *testing.T) { var onCreateCalls, onDeleteCalls []testResource var onUpdateCalls []updateCall - reconciler, err := NewReconciler[testResource](ReconcilerConfig[testResource]{ + cfg := ReconcilerConfig[testResource]{ Matcher: func(tr testResource) bool { return MatchResourceLabels(test.selectors, tr.GetMetadata().Labels) }, @@ -225,7 +243,13 @@ func TestReconciler(t *testing.T) { onDeleteCalls = append(onDeleteCalls, tr) return nil }, - }) + } + + if test.configure != nil { + test.configure(&cfg) + } + + reconciler, err := NewReconciler[testResource](cfg) require.NoError(t, err) // Reconcile and make sure we got all expected callback calls. From 3fc749aea7b03a0df42a8bbc96f5fb0d5f32abe6 Mon Sep 17 00:00:00 2001 From: Steven Martin Date: Thu, 9 Jan 2025 04:39:47 -0500 Subject: [PATCH 20/45] docs: update doc verbiage for usage of 'its' (#50897) * docs: update azure and openssh instrs * docs: update usage of its --- .../access-controls/idps/saml-attribute-mapping.mdx | 4 ++-- docs/pages/admin-guides/access-controls/idps/saml-guide.mdx | 2 +- docs/pages/enroll-resources/machine-id/deployment/azure.mdx | 2 +- .../server-access/openssh/openssh-manual-install.mdx | 2 +- docs/pages/reference/architecture/trustedclusters.mdx | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/pages/admin-guides/access-controls/idps/saml-attribute-mapping.mdx b/docs/pages/admin-guides/access-controls/idps/saml-attribute-mapping.mdx index 94e15948a88e9..25a485b253975 100644 --- a/docs/pages/admin-guides/access-controls/idps/saml-attribute-mapping.mdx +++ b/docs/pages/admin-guides/access-controls/idps/saml-attribute-mapping.mdx @@ -73,7 +73,7 @@ Attribute mapping that points to a non-existent value will not be included in SA Predicate expressions for attribute mapping are evaluated against user attributes that can be accessed using evaluation context listed above. -The supported functions and methods are listed below, along with the usage syntax and it's result, evaluated +The supported functions and methods are listed below, along with the usage syntax and its result, evaluated against the following reference user spec file: ```yaml # reference user spec file @@ -200,4 +200,4 @@ $ tctl idp saml test-attribute-mapping --user user.yml --sp sp.yml Print result in format of choice. ```code $ tctl idp saml test-attribute-mapping --user user.yml --sp sp.yml --format (json/yaml) -``` \ No newline at end of file +``` diff --git a/docs/pages/admin-guides/access-controls/idps/saml-guide.mdx b/docs/pages/admin-guides/access-controls/idps/saml-guide.mdx index 79a748fc2a60b..5d1c924c0912c 100644 --- a/docs/pages/admin-guides/access-controls/idps/saml-guide.mdx +++ b/docs/pages/admin-guides/access-controls/idps/saml-guide.mdx @@ -141,7 +141,7 @@ $ tctl create iamshowcase.yaml -If an `entity_descriptor` is provided, it's content takes preference over values provided in `entity_id` and `acs_url`. +If an `entity_descriptor` is provided, its content takes preference over values provided in `entity_id` and `acs_url`. Teleport only tries to fetch or generate entity descriptor when service provider is created for the first time. Subsequent updates require an entity descriptor to be present in the service provider spec. As such, when updating diff --git a/docs/pages/enroll-resources/machine-id/deployment/azure.mdx b/docs/pages/enroll-resources/machine-id/deployment/azure.mdx index c78005fbb7598..1639f81cefc4a 100644 --- a/docs/pages/enroll-resources/machine-id/deployment/azure.mdx +++ b/docs/pages/enroll-resources/machine-id/deployment/azure.mdx @@ -12,7 +12,7 @@ On the Azure platform, virtual machines can be assigned a managed identity. The Azure platform will then make available to the virtual machine an attested data document and JWT that allows the virtual machine to act as this identity. This identity can be validated by a third party by attempting to use this token -to fetch it's own identity from the Azure identity service. +to fetch its own identity from the Azure identity service. The `azure` join method instructs the bot to use this attested data document and JWT to prove its identity to the Teleport Auth Server. This allows joining to diff --git a/docs/pages/enroll-resources/server-access/openssh/openssh-manual-install.mdx b/docs/pages/enroll-resources/server-access/openssh/openssh-manual-install.mdx index 31885ef8ba816..0f91c395dcd32 100644 --- a/docs/pages/enroll-resources/server-access/openssh/openssh-manual-install.mdx +++ b/docs/pages/enroll-resources/server-access/openssh/openssh-manual-install.mdx @@ -224,7 +224,7 @@ $ tctl get node/openssh-node When creating host certificates, it is important to specify all the domain names and addresses that refer to your node. If you try to connect to a node with a -name or address that was not specified when creating it's host certificate, +name or address that was not specified when creating its host certificate, Teleport will reject the SSH connection. On your local machine, assign the IP address, fully qualified domain name of diff --git a/docs/pages/reference/architecture/trustedclusters.mdx b/docs/pages/reference/architecture/trustedclusters.mdx index 35bf8256cd30c..ee6cd43b4dbe2 100644 --- a/docs/pages/reference/architecture/trustedclusters.mdx +++ b/docs/pages/reference/architecture/trustedclusters.mdx @@ -29,7 +29,7 @@ databases behind a firewall. In the example below, there are three independent clusters: - Cluster `sso.example.com` is a root cluster. This cluster can be used as a single-sign-on entry point -for your organization. It can have it's own independent resources connected to it, or be used just for audit +for your organization. It can have its own independent resources connected to it, or be used just for audit logs collection and single-sign-on. - Clusters `us-east-1a` and `us-east-1b` are two independent clusters in different availability zones. From 4b2a0da91dd50bc64612147eb0fa77178b5a3ce0 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Thu, 9 Jan 2025 09:45:41 +0000 Subject: [PATCH 21/45] Fix AWS ListDeployedDatabaseServices when there's no ECS Cluster (#50843) Calling the AWS API `ecs:ListServices` with a non-existent ECS Cluster name will return a 400 w/ ClusterNotFoundException. The existing code was not handling that error and a raw error was returned. This PR changes the logic to ensure that case is handled and that the ListDeployedDatabaseServices returns an empty list. An alternative would be to call the ListClusters beforehand, but that would increase the number of API calls we do to external services. --- lib/cloud/aws/errors.go | 9 +++++++ lib/cloud/aws/errors_test.go | 12 +++++++++ .../awsoidc/listdeployeddatabaseservice.go | 6 +++++ .../listdeployeddatabaseservice_test.go | 25 ++++++++++++++++--- 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/lib/cloud/aws/errors.go b/lib/cloud/aws/errors.go index 576e7f4350ce2..63a9ffa75ca95 100644 --- a/lib/cloud/aws/errors.go +++ b/lib/cloud/aws/errors.go @@ -24,6 +24,7 @@ import ( "strings" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + ecstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" @@ -55,6 +56,10 @@ func ConvertRequestFailureErrorV2(err error) error { return err } +var ( + ecsClusterNotFoundException *ecstypes.ClusterNotFoundException +) + func convertRequestFailureErrorFromStatusCode(statusCode int, requestErr error) error { switch statusCode { case http.StatusForbidden: @@ -69,6 +74,10 @@ func convertRequestFailureErrorFromStatusCode(statusCode int, requestErr error) if strings.Contains(requestErr.Error(), redshiftserverless.ErrCodeAccessDeniedException) { return trace.AccessDenied(requestErr.Error()) } + + if strings.Contains(requestErr.Error(), ecsClusterNotFoundException.ErrorCode()) { + return trace.NotFound(requestErr.Error()) + } } return requestErr // Return unmodified. diff --git a/lib/cloud/aws/errors_test.go b/lib/cloud/aws/errors_test.go index 165456bfdb25b..7f0c3c26b0307 100644 --- a/lib/cloud/aws/errors_test.go +++ b/lib/cloud/aws/errors_test.go @@ -85,6 +85,18 @@ func TestConvertRequestFailureError(t *testing.T) { }, wantIsError: trace.IsNotFound, }, + { + name: "v2 sdk error for ecs ClusterNotFoundException", + inputError: &awshttp.ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: &smithyhttp.Response{Response: &http.Response{ + StatusCode: http.StatusBadRequest, + }}, + Err: trace.Errorf("ClusterNotFoundException"), + }, + }, + wantIsError: trace.IsNotFound, + }, } for _, test := range tests { diff --git a/lib/integrations/awsoidc/listdeployeddatabaseservice.go b/lib/integrations/awsoidc/listdeployeddatabaseservice.go index c2894902f78fe..ad5bb9606faf4 100644 --- a/lib/integrations/awsoidc/listdeployeddatabaseservice.go +++ b/lib/integrations/awsoidc/listdeployeddatabaseservice.go @@ -27,6 +27,7 @@ import ( ecstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/gravitational/trace" + awslib "github.com/gravitational/teleport/lib/cloud/aws" "github.com/gravitational/teleport/lib/integrations/awsoidc/tags" ) @@ -139,6 +140,11 @@ func ListDeployedDatabaseServices(ctx context.Context, clt ListDeployedDatabaseS listServicesOutput, err := clt.ListServices(ctx, listServicesInput) if err != nil { + convertedError := awslib.ConvertRequestFailureErrorV2(err) + if trace.IsNotFound(convertedError) { + return &ListDeployedDatabaseServicesResponse{}, nil + } + return nil, trace.Wrap(err) } diff --git a/lib/integrations/awsoidc/listdeployeddatabaseservice_test.go b/lib/integrations/awsoidc/listdeployeddatabaseservice_test.go index 67f332d495c2b..84b163d519465 100644 --- a/lib/integrations/awsoidc/listdeployeddatabaseservice_test.go +++ b/lib/integrations/awsoidc/listdeployeddatabaseservice_test.go @@ -110,11 +110,11 @@ type mockListECSClient struct { } func (m *mockListECSClient) ListServices(ctx context.Context, params *ecs.ListServicesInput, optFns ...func(*ecs.Options)) (*ecs.ListServicesOutput, error) { - ret := &ecs.ListServicesOutput{} - if aws.ToString(params.Cluster) != m.clusterName { - return ret, nil + if aws.ToString(params.Cluster) != m.clusterName || len(m.services) == 0 { + return nil, trace.NotFound("ECS Cluster not found") } + ret := &ecs.ListServicesOutput{} requestedPage := 1 totalEndpoints := len(m.services) @@ -348,6 +348,25 @@ func TestListDeployedDatabaseServices(t *testing.T) { }, errCheck: require.NoError, }, + { + name: "returns empty list when the ECS Cluster does not exist", + req: ListDeployedDatabaseServicesRequest{ + Integration: "my-integration", + TeleportClusterName: "my-cluster", + Region: "us-east-1", + }, + mockClient: func() *mockListECSClient { + ret := &mockListECSClient{ + pageSize: 10, + } + return ret + }, + respCheck: func(t *testing.T, resp *ListDeployedDatabaseServicesResponse) { + require.Empty(t, resp.DeployedDatabaseServices, "expected 0 services") + require.Empty(t, resp.NextToken, "expected an empty NextToken") + }, + errCheck: require.NoError, + }, } { t.Run(tt.name, func(t *testing.T) { resp, err := ListDeployedDatabaseServices(ctx, tt.mockClient(), tt.req) From d413d32aaee760ce20786752f4cfa03d66f060a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Cie=C5=9Blak?= Date: Thu, 9 Jan 2025 10:47:04 +0100 Subject: [PATCH 22/45] Allow Icon to receive ref (#50816) * Replace JSX.Element with React.ReactNode * Allow Icon to receive ref * Use a simpler example in the WithRef story --- package.json | 2 +- web/packages/design/src/Icon/Icon.story.tsx | 51 ++++++++++++++ web/packages/design/src/Icon/Icon.tsx | 67 ++++++++++--------- web/packages/design/src/Icon/Icons/Add.tsx | 18 +++-- .../design/src/Icon/Icons/AddCircle.tsx | 11 +-- .../design/src/Icon/Icons/AddUsers.tsx | 11 +-- .../design/src/Icon/Icons/AlarmRing.tsx | 11 +-- .../design/src/Icon/Icons/AmazonAws.tsx | 11 +-- .../design/src/Icon/Icons/Apartment.tsx | 11 +-- web/packages/design/src/Icon/Icons/Apple.tsx | 18 +++-- .../design/src/Icon/Icons/Application.tsx | 11 +-- .../design/src/Icon/Icons/Archive.tsx | 11 +-- .../design/src/Icon/Icons/ArrowBack.tsx | 11 +-- .../design/src/Icon/Icons/ArrowDown.tsx | 11 +-- .../design/src/Icon/Icons/ArrowFatLinesUp.tsx | 15 ++--- .../design/src/Icon/Icons/ArrowForward.tsx | 11 +-- .../design/src/Icon/Icons/ArrowLeft.tsx | 11 +-- .../design/src/Icon/Icons/ArrowLineLeft.tsx | 11 +-- .../design/src/Icon/Icons/ArrowRight.tsx | 11 +-- .../design/src/Icon/Icons/ArrowSquareOut.tsx | 11 +-- .../design/src/Icon/Icons/ArrowUp.tsx | 11 +-- .../design/src/Icon/Icons/ArrowsIn.tsx | 11 +-- .../design/src/Icon/Icons/ArrowsOut.tsx | 11 +-- .../design/src/Icon/Icons/BellRinging.tsx | 11 +-- .../design/src/Icon/Icons/BookOpenText.tsx | 11 +-- web/packages/design/src/Icon/Icons/Bots.tsx | 18 +++-- .../design/src/Icon/Icons/Broadcast.tsx | 11 +-- .../design/src/Icon/Icons/BroadcastSlash.tsx | 11 +-- web/packages/design/src/Icon/Icons/Bubble.tsx | 11 +-- web/packages/design/src/Icon/Icons/CCAmex.tsx | 11 +-- .../design/src/Icon/Icons/CCDiscover.tsx | 11 +-- .../design/src/Icon/Icons/CCMasterCard.tsx | 11 +-- .../design/src/Icon/Icons/CCStripe.tsx | 11 +-- web/packages/design/src/Icon/Icons/CCVisa.tsx | 11 +-- .../design/src/Icon/Icons/Calendar.tsx | 11 +-- web/packages/design/src/Icon/Icons/Camera.tsx | 11 +-- .../design/src/Icon/Icons/CardView.tsx | 11 +-- web/packages/design/src/Icon/Icons/Cash.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Chart.tsx | 18 +++-- .../design/src/Icon/Icons/ChatBubble.tsx | 11 +-- .../src/Icon/Icons/ChatCircleSparkle.tsx | 15 ++--- web/packages/design/src/Icon/Icons/Check.tsx | 18 +++-- .../design/src/Icon/Icons/CheckThick.tsx | 11 +-- web/packages/design/src/Icon/Icons/Checks.tsx | 11 +-- .../src/Icon/Icons/ChevronCircleDown.tsx | 15 ++--- .../src/Icon/Icons/ChevronCircleLeft.tsx | 15 ++--- .../src/Icon/Icons/ChevronCircleRight.tsx | 15 ++--- .../design/src/Icon/Icons/ChevronCircleUp.tsx | 15 ++--- .../design/src/Icon/Icons/ChevronDown.tsx | 11 +-- .../design/src/Icon/Icons/ChevronLeft.tsx | 11 +-- .../design/src/Icon/Icons/ChevronRight.tsx | 11 +-- .../design/src/Icon/Icons/ChevronUp.tsx | 11 +-- .../src/Icon/Icons/ChevronsVertical.tsx | 15 ++--- .../design/src/Icon/Icons/CircleArrowLeft.tsx | 15 ++--- .../src/Icon/Icons/CircleArrowRight.tsx | 15 ++--- .../design/src/Icon/Icons/CircleCheck.tsx | 11 +-- .../design/src/Icon/Icons/CircleCross.tsx | 11 +-- .../design/src/Icon/Icons/CirclePause.tsx | 11 +-- .../design/src/Icon/Icons/CirclePlay.tsx | 11 +-- .../design/src/Icon/Icons/CircleStop.tsx | 11 +-- web/packages/design/src/Icon/Icons/Cli.tsx | 18 +++-- .../design/src/Icon/Icons/Clipboard.tsx | 11 +-- .../design/src/Icon/Icons/ClipboardUser.tsx | 11 +-- web/packages/design/src/Icon/Icons/Clock.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Cloud.tsx | 18 +++-- .../design/src/Icon/Icons/Cluster.tsx | 11 +-- web/packages/design/src/Icon/Icons/Code.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Cog.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Config.tsx | 11 +-- .../design/src/Icon/Icons/Contract.tsx | 11 +-- web/packages/design/src/Icon/Icons/Copy.tsx | 18 +++-- .../design/src/Icon/Icons/CreditCard.tsx | 11 +-- web/packages/design/src/Icon/Icons/Cross.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Crown.tsx | 18 +++-- .../design/src/Icon/Icons/Database.tsx | 11 +-- .../design/src/Icon/Icons/Desktop.tsx | 11 +-- .../src/Icon/Icons/DeviceMobileCamera.tsx | 15 ++--- .../design/src/Icon/Icons/Devices.tsx | 11 +-- .../design/src/Icon/Icons/Download.tsx | 11 +-- web/packages/design/src/Icon/Icons/Earth.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Edit.tsx | 18 +++-- .../design/src/Icon/Icons/Ellipsis.tsx | 11 +-- .../design/src/Icon/Icons/EmailSolid.tsx | 11 +-- .../design/src/Icon/Icons/EnvelopeOpen.tsx | 11 +-- .../src/Icon/Icons/EqualizersVertical.tsx | 15 ++--- web/packages/design/src/Icon/Icons/Expand.tsx | 11 +-- .../design/src/Icon/Icons/Facebook.tsx | 11 +-- .../src/Icon/Icons/FingerprintSimple.tsx | 15 ++--- web/packages/design/src/Icon/Icons/Floppy.tsx | 11 +-- .../design/src/Icon/Icons/FlowArrow.tsx | 11 +-- .../design/src/Icon/Icons/FolderPlus.tsx | 11 +-- .../design/src/Icon/Icons/FolderShared.tsx | 11 +-- web/packages/design/src/Icon/Icons/GitHub.tsx | 11 +-- web/packages/design/src/Icon/Icons/Google.tsx | 11 +-- web/packages/design/src/Icon/Icons/Graph.tsx | 18 +++-- .../design/src/Icon/Icons/Hashtag.tsx | 11 +-- .../design/src/Icon/Icons/Headset.tsx | 11 +-- web/packages/design/src/Icon/Icons/Home.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Info.tsx | 18 +++-- .../design/src/Icon/Icons/Integrations.tsx | 11 +-- .../design/src/Icon/Icons/Invoices.tsx | 11 +-- web/packages/design/src/Icon/Icons/Key.tsx | 18 +++-- .../design/src/Icon/Icons/KeyHole.tsx | 11 +-- .../design/src/Icon/Icons/Keyboard.tsx | 11 +-- .../design/src/Icon/Icons/Keypair.tsx | 11 +-- .../design/src/Icon/Icons/Kubernetes.tsx | 11 +-- web/packages/design/src/Icon/Icons/Label.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Lan.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Laptop.tsx | 11 +-- web/packages/design/src/Icon/Icons/Layout.tsx | 11 +-- .../design/src/Icon/Icons/License.tsx | 11 +-- .../design/src/Icon/Icons/LineSegment.tsx | 11 +-- .../design/src/Icon/Icons/LineSegments.tsx | 11 +-- web/packages/design/src/Icon/Icons/Link.tsx | 18 +++-- .../design/src/Icon/Icons/Linkedin.tsx | 11 +-- web/packages/design/src/Icon/Icons/Linux.tsx | 18 +++-- .../design/src/Icon/Icons/ListAddCheck.tsx | 11 +-- .../src/Icon/Icons/ListMagnifyingGlass.tsx | 15 ++--- .../design/src/Icon/Icons/ListThin.tsx | 11 +-- .../design/src/Icon/Icons/ListView.tsx | 11 +-- web/packages/design/src/Icon/Icons/Lock.tsx | 18 +++-- .../design/src/Icon/Icons/LockKey.tsx | 11 +-- web/packages/design/src/Icon/Icons/Logout.tsx | 11 +-- .../design/src/Icon/Icons/Magnifier.tsx | 11 +-- .../design/src/Icon/Icons/MagnifyingMinus.tsx | 15 ++--- .../design/src/Icon/Icons/MagnifyingPlus.tsx | 11 +-- web/packages/design/src/Icon/Icons/Memory.tsx | 11 +-- web/packages/design/src/Icon/Icons/Minus.tsx | 18 +++-- .../design/src/Icon/Icons/MinusCircle.tsx | 11 +-- web/packages/design/src/Icon/Icons/Moon.tsx | 18 +++-- .../design/src/Icon/Icons/MoreHoriz.tsx | 11 +-- .../design/src/Icon/Icons/MoreVert.tsx | 11 +-- web/packages/design/src/Icon/Icons/Mute.tsx | 18 +++-- web/packages/design/src/Icon/Icons/NewTab.tsx | 11 +-- .../design/src/Icon/Icons/NoteAdded.tsx | 11 +-- .../design/src/Icon/Icons/Notification.tsx | 11 +-- .../src/Icon/Icons/NotificationsActive.tsx | 15 ++--- .../design/src/Icon/Icons/PaperPlane.tsx | 11 +-- .../design/src/Icon/Icons/Password.tsx | 11 +-- web/packages/design/src/Icon/Icons/Pencil.tsx | 11 +-- web/packages/design/src/Icon/Icons/Planet.tsx | 11 +-- web/packages/design/src/Icon/Icons/Plugs.tsx | 18 +++-- .../design/src/Icon/Icons/PlugsConnected.tsx | 11 +-- web/packages/design/src/Icon/Icons/Plus.tsx | 18 +++-- .../design/src/Icon/Icons/PowerSwitch.tsx | 11 +-- .../design/src/Icon/Icons/Printer.tsx | 11 +-- .../design/src/Icon/Icons/Profile.tsx | 11 +-- .../design/src/Icon/Icons/PushPin.tsx | 11 +-- .../design/src/Icon/Icons/PushPinFilled.tsx | 11 +-- .../design/src/Icon/Icons/Question.tsx | 11 +-- .../design/src/Icon/Icons/Refresh.tsx | 11 +-- .../design/src/Icon/Icons/Restore.tsx | 11 +-- .../design/src/Icon/Icons/RocketLaunch.tsx | 11 +-- web/packages/design/src/Icon/Icons/Rows.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Ruler.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Run.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Scan.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Server.tsx | 11 +-- web/packages/design/src/Icon/Icons/Share.tsx | 18 +++-- .../design/src/Icon/Icons/ShieldCheck.tsx | 11 +-- .../design/src/Icon/Icons/ShieldWarning.tsx | 11 +-- .../design/src/Icon/Icons/Sliders.tsx | 11 +-- .../design/src/Icon/Icons/SlidersVertical.tsx | 15 ++--- web/packages/design/src/Icon/Icons/Speed.tsx | 18 +++-- .../design/src/Icon/Icons/Spinner.tsx | 11 +-- .../design/src/Icon/Icons/SquaresFour.tsx | 11 +-- web/packages/design/src/Icon/Icons/Stars.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Sun.tsx | 18 +++-- .../design/src/Icon/Icons/SyncAlt.tsx | 11 +-- web/packages/design/src/Icon/Icons/Table.tsx | 18 +++-- web/packages/design/src/Icon/Icons/Tablet.tsx | 11 +-- web/packages/design/src/Icon/Icons/Tags.tsx | 18 +++-- .../design/src/Icon/Icons/Terminal.tsx | 11 +-- web/packages/design/src/Icon/Icons/Trash.tsx | 18 +++-- .../design/src/Icon/Icons/Twitter.tsx | 11 +-- .../design/src/Icon/Icons/Unarchive.tsx | 11 +-- web/packages/design/src/Icon/Icons/Unlink.tsx | 11 +-- web/packages/design/src/Icon/Icons/Unlock.tsx | 11 +-- web/packages/design/src/Icon/Icons/Upload.tsx | 11 +-- .../design/src/Icon/Icons/UsbDrive.tsx | 11 +-- web/packages/design/src/Icon/Icons/User.tsx | 18 +++-- .../design/src/Icon/Icons/UserAdd.tsx | 11 +-- .../design/src/Icon/Icons/UserCircleGear.tsx | 11 +-- .../design/src/Icon/Icons/UserFocus.tsx | 11 +-- .../design/src/Icon/Icons/UserIdBadge.tsx | 11 +-- .../design/src/Icon/Icons/UserList.tsx | 11 +-- web/packages/design/src/Icon/Icons/Users.tsx | 18 +++-- .../design/src/Icon/Icons/UsersTriple.tsx | 11 +-- web/packages/design/src/Icon/Icons/Vault.tsx | 18 +++-- .../design/src/Icon/Icons/VideoGame.tsx | 11 +-- .../design/src/Icon/Icons/VolumeUp.tsx | 11 +-- web/packages/design/src/Icon/Icons/VpnKey.tsx | 11 +-- web/packages/design/src/Icon/Icons/Wand.tsx | 18 +++-- .../design/src/Icon/Icons/Warning.tsx | 11 +-- .../design/src/Icon/Icons/WarningCircle.tsx | 11 +-- web/packages/design/src/Icon/Icons/Wifi.tsx | 18 +++-- .../design/src/Icon/Icons/Windows.tsx | 11 +-- web/packages/design/src/Icon/Icons/Wrench.tsx | 11 +-- .../design/src/Icon/Icons/Youtube.tsx | 11 +-- .../design/src/Icon/script/IconTemplate.txt | 18 +++-- .../design/src/SVGIcon/SvgIcon.story.tsx | 6 +- .../teleport/src/Navigation/RecentHistory.tsx | 6 +- .../SideNavigation/CategoryIcon.tsx | 4 +- .../Navigation/SideNavigation/Navigation.tsx | 3 +- .../src/Sessions/SessionList/SessionList.tsx | 3 +- .../teleport/src/TopBar/DeviceTrustIcon.tsx | 3 +- web/packages/teleport/src/TopBar/TopBar.tsx | 4 +- .../src/components/TabIcon/TabIcon.tsx | 3 +- web/packages/teleport/src/types.ts | 4 +- 209 files changed, 1770 insertions(+), 946 deletions(-) create mode 100644 web/packages/design/src/Icon/Icon.story.tsx diff --git a/package.json b/package.json index e8b5ab7246759..a5485c04dc1b4 100644 --- a/package.json +++ b/package.json @@ -24,7 +24,7 @@ "type-check": "NODE_OPTIONS='--max-old-space-size=4096' tsc --build", "prettier-check": "prettier --check '+(e|web)/**/*.{ts,tsx,js,jsx,mts}'", "prettier-write": "prettier --write --log-level silent '+(e|web)/**/*.{ts,tsx,js,jsx,mts}'", - "process-icons": "node web/packages/design/src/Icon/script/script.js & pnpm prettier --loglevel silent --write 'web/packages/design/src/Icon/Icons/*.tsx'", + "process-icons": "node web/packages/design/src/Icon/script/script.js & pnpm prettier --log-level silent --write 'web/packages/design/src/Icon/**/*.tsx'", "nop": "exit 0" }, "private": true, diff --git a/web/packages/design/src/Icon/Icon.story.tsx b/web/packages/design/src/Icon/Icon.story.tsx new file mode 100644 index 0000000000000..87377283a0869 --- /dev/null +++ b/web/packages/design/src/Icon/Icon.story.tsx @@ -0,0 +1,51 @@ +/** + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +import { useEffect, useRef } from 'react'; +import styled from 'styled-components'; + +import { Flex } from 'design'; +import { blink } from 'design/keyframes'; + +import { Broadcast } from './Icons/Broadcast'; + +export default { + // Nest stories under Icon/Icon, so that Icon/Icons which lists all icons is the first story. + title: 'Design/Icon/Icon', +}; + +export const WithRef = () => { + const nodeRef = useRef(null); + + useEffect(() => { + nodeRef.current?.scrollIntoView({ block: 'center' }); + }, []); + + return ( + +
+ +

On the first render, the view should be scrolled to the icon.

+
+
+ ); +}; + +const StyledBroadcast = styled(Broadcast)` + animation: ${blink} 1s ease-in-out infinite; +`; diff --git a/web/packages/design/src/Icon/Icon.tsx b/web/packages/design/src/Icon/Icon.tsx index 511bc47e93dc8..5d62c65cd5748 100644 --- a/web/packages/design/src/Icon/Icon.tsx +++ b/web/packages/design/src/Icon/Icon.tsx @@ -16,42 +16,40 @@ * along with this program. If not, see . */ -import React, { PropsWithChildren } from 'react'; +import React, { ForwardedRef, forwardRef, PropsWithChildren } from 'react'; import styled from 'styled-components'; import { borderRadius, color, space, SpaceProps } from 'design/system'; -export function Icon({ - size = 'medium', - children, - ...otherProps -}: PropsWithChildren) { - let iconSize = size; - if (size === 'small') { - iconSize = 16; +export const Icon = forwardRef>( + ({ size = 'medium', children, ...otherProps }, ref) => { + let iconSize = size; + if (size === 'small') { + iconSize = 16; + } + if (size === 'medium') { + iconSize = 20; + } + if (size === 'large') { + iconSize = 24; + } + if (size === 'extra-large') { + iconSize = 32; + } + return ( + + + {children} + + + ); } - if (size === 'medium') { - iconSize = 20; - } - if (size === 'large') { - iconSize = 24; - } - if (size === 'extra-large') { - iconSize = 32; - } - return ( - - - {children} - - - ); -} +); const StyledIcon = styled.span` display: inline-flex; @@ -65,6 +63,9 @@ const StyledIcon = styled.span` export type IconSize = 'small' | 'medium' | 'large' | 'extra-large' | number; +/** + * IconProps are used in each autogenerated icon component. + */ export type IconProps = SpaceProps & { size?: IconSize; color?: string; @@ -79,7 +80,11 @@ export type IconProps = SpaceProps & { className?: string; }; +/** + * Props are used on the Icon component, but not in autogenerated icon components. + */ type Props = IconProps & { children?: React.SVGProps | React.SVGProps[]; a?: any; + ref?: ForwardedRef; }; diff --git a/web/packages/design/src/Icon/Icons/Add.tsx b/web/packages/design/src/Icon/Icons/Add.tsx index 67a9d045f54bf..5c6ed4a2f4055 100644 --- a/web/packages/design/src/Icon/Icons/Add.tsx +++ b/web/packages/design/src/Icon/Icons/Add.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,10 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Add({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Add = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/AddCircle.tsx b/web/packages/design/src/Icon/Icons/AddCircle.tsx index a45873ed1cdc5..05a61f6a7d4cd 100644 --- a/web/packages/design/src/Icon/Icons/AddCircle.tsx +++ b/web/packages/design/src/Icon/Icons/AddCircle.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function AddCircle({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const AddCircle = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/AddUsers.tsx b/web/packages/design/src/Icon/Icons/AddUsers.tsx index 4daa30af03e9e..291d1107fd1e0 100644 --- a/web/packages/design/src/Icon/Icons/AddUsers.tsx +++ b/web/packages/design/src/Icon/Icons/AddUsers.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function AddUsers({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const AddUsers = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/AlarmRing.tsx b/web/packages/design/src/Icon/Icons/AlarmRing.tsx index 3a281b895a7a2..74c7d81373360 100644 --- a/web/packages/design/src/Icon/Icons/AlarmRing.tsx +++ b/web/packages/design/src/Icon/Icons/AlarmRing.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function AlarmRing({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const AlarmRing = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -65,5 +68,5 @@ export function AlarmRing({ size = 24, color, ...otherProps }: IconProps) { /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/AmazonAws.tsx b/web/packages/design/src/Icon/Icons/AmazonAws.tsx index 2a7d1c3d25c93..0b95de05e7b49 100644 --- a/web/packages/design/src/Icon/Icons/AmazonAws.tsx +++ b/web/packages/design/src/Icon/Icons/AmazonAws.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function AmazonAws({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const AmazonAws = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Apartment.tsx b/web/packages/design/src/Icon/Icons/Apartment.tsx index c7f94a1dd130f..dbacc005e643f 100644 --- a/web/packages/design/src/Icon/Icons/Apartment.tsx +++ b/web/packages/design/src/Icon/Icons/Apartment.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Apartment({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Apartment = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Apple.tsx b/web/packages/design/src/Icon/Icons/Apple.tsx index 1646852494d26..111db50141e78 100644 --- a/web/packages/design/src/Icon/Icons/Apple.tsx +++ b/web/packages/design/src/Icon/Icons/Apple.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Apple({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Apple = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Application.tsx b/web/packages/design/src/Icon/Icons/Application.tsx index 8463784e06942..d4170c24141d0 100644 --- a/web/packages/design/src/Icon/Icons/Application.tsx +++ b/web/packages/design/src/Icon/Icons/Application.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Application({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Application = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function Application({ size = 24, color, ...otherProps }: IconProps) { d="M2.25 5.25C2.25 4.42157 2.92157 3.75 3.75 3.75H20.25C21.0784 3.75 21.75 4.42157 21.75 5.25V18.75C21.75 19.5784 21.0784 20.25 20.25 20.25H3.75C2.92157 20.25 2.25 19.5784 2.25 18.75V5.25ZM20.25 5.25H3.75V18.75H20.25V5.25Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Archive.tsx b/web/packages/design/src/Icon/Icons/Archive.tsx index bb0d928fa815b..338a6a76d63b0 100644 --- a/web/packages/design/src/Icon/Icons/Archive.tsx +++ b/web/packages/design/src/Icon/Icons/Archive.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Archive({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Archive = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowBack.tsx b/web/packages/design/src/Icon/Icons/ArrowBack.tsx index 0de01b394a7f4..3c083daee01fc 100644 --- a/web/packages/design/src/Icon/Icons/ArrowBack.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowBack.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowBack({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowBack = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowDown.tsx b/web/packages/design/src/Icon/Icons/ArrowDown.tsx index 907178413e8f6..185cdece515c9 100644 --- a/web/packages/design/src/Icon/Icons/ArrowDown.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowDown.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowDown({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowDown = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowFatLinesUp.tsx b/web/packages/design/src/Icon/Icons/ArrowFatLinesUp.tsx index 90f912bc95c73..45caeec4808d1 100644 --- a/web/packages/design/src/Icon/Icons/ArrowFatLinesUp.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowFatLinesUp.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowFatLinesUp({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ArrowFatLinesUp = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowForward.tsx b/web/packages/design/src/Icon/Icons/ArrowForward.tsx index 50c9157d56b48..0453d29fc5774 100644 --- a/web/packages/design/src/Icon/Icons/ArrowForward.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowForward.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowForward({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowForward = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowLeft.tsx b/web/packages/design/src/Icon/Icons/ArrowLeft.tsx index a7496ae93920f..7546e06847736 100644 --- a/web/packages/design/src/Icon/Icons/ArrowLeft.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowLeft.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowLeft({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowLeft = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowLineLeft.tsx b/web/packages/design/src/Icon/Icons/ArrowLineLeft.tsx index 8eceb19b95985..b37403bc1694d 100644 --- a/web/packages/design/src/Icon/Icons/ArrowLineLeft.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowLineLeft.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowLineLeft({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowLineLeft = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowRight.tsx b/web/packages/design/src/Icon/Icons/ArrowRight.tsx index 3ac8846d4a811..53f102b391611 100644 --- a/web/packages/design/src/Icon/Icons/ArrowRight.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowRight.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowRight({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowRight = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowSquareOut.tsx b/web/packages/design/src/Icon/Icons/ArrowSquareOut.tsx index 309810dcea743..b8237a4b8d252 100644 --- a/web/packages/design/src/Icon/Icons/ArrowSquareOut.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowSquareOut.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowSquareOut({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowSquareOut = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowUp.tsx b/web/packages/design/src/Icon/Icons/ArrowUp.tsx index b932ea5886975..348883c5c463d 100644 --- a/web/packages/design/src/Icon/Icons/ArrowUp.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowUp.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowUp({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowUp = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowsIn.tsx b/web/packages/design/src/Icon/Icons/ArrowsIn.tsx index d5e54b7bbf996..e07e1dff510cb 100644 --- a/web/packages/design/src/Icon/Icons/ArrowsIn.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowsIn.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowsIn({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowsIn = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ArrowsOut.tsx b/web/packages/design/src/Icon/Icons/ArrowsOut.tsx index 215beffb58f77..1aad6fa766e9f 100644 --- a/web/packages/design/src/Icon/Icons/ArrowsOut.tsx +++ b/web/packages/design/src/Icon/Icons/ArrowsOut.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ArrowsOut({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ArrowsOut = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/BellRinging.tsx b/web/packages/design/src/Icon/Icons/BellRinging.tsx index 97cea1e9e506f..3a54175c3f896 100644 --- a/web/packages/design/src/Icon/Icons/BellRinging.tsx +++ b/web/packages/design/src/Icon/Icons/BellRinging.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function BellRinging({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const BellRinging = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/BookOpenText.tsx b/web/packages/design/src/Icon/Icons/BookOpenText.tsx index aa86e0a5d0f18..390229125f213 100644 --- a/web/packages/design/src/Icon/Icons/BookOpenText.tsx +++ b/web/packages/design/src/Icon/Icons/BookOpenText.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function BookOpenText({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const BookOpenText = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -65,5 +68,5 @@ export function BookOpenText({ size = 24, color, ...otherProps }: IconProps) { d="M1.93934 3.93934C2.22064 3.65804 2.60217 3.5 3 3.5H9C9.99456 3.5 10.9484 3.89509 11.6517 4.59835C11.7779 4.72456 11.8941 4.85884 12 4.99998C12.1059 4.85884 12.2221 4.72456 12.3484 4.59835C13.0516 3.89509 14.0054 3.5 15 3.5H21C21.3978 3.5 21.7794 3.65804 22.0607 3.93934C22.342 4.22065 22.5 4.60218 22.5 5V17C22.5 17.3978 22.342 17.7794 22.0607 18.0607C21.7794 18.342 21.3978 18.5 21 18.5H15C14.4033 18.5 13.831 18.7371 13.409 19.159C12.9871 19.581 12.75 20.1533 12.75 20.75C12.75 21.1642 12.4142 21.5 12 21.5C11.5858 21.5 11.25 21.1642 11.25 20.75C11.25 20.1533 11.0129 19.581 10.591 19.159C10.169 18.7371 9.59674 18.5 9 18.5H3C2.60218 18.5 2.22065 18.342 1.93934 18.0607C1.65803 17.7794 1.5 17.3978 1.5 17V5C1.5 4.60218 1.65804 4.22064 1.93934 3.93934ZM12.75 17.75V7.25C12.75 6.65326 12.9871 6.08097 13.409 5.65901C13.831 5.23705 14.4033 5 15 5L21 5V17H15C14.1839 17 13.3953 17.266 12.75 17.75ZM10.591 5.65901C11.0129 6.08097 11.25 6.65326 11.25 7.25V17.75C10.6047 17.266 9.81606 17 9 17H3V5L9 5C9.59674 5 10.169 5.23705 10.591 5.65901Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Bots.tsx b/web/packages/design/src/Icon/Icons/Bots.tsx index d58983844689b..130d566fef584 100644 --- a/web/packages/design/src/Icon/Icons/Bots.tsx +++ b/web/packages/design/src/Icon/Icons/Bots.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Bots({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Bots = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Broadcast.tsx b/web/packages/design/src/Icon/Icons/Broadcast.tsx index 160e232e2f3b2..903cee7d5cf57 100644 --- a/web/packages/design/src/Icon/Icons/Broadcast.tsx +++ b/web/packages/design/src/Icon/Icons/Broadcast.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Broadcast({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Broadcast = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -66,5 +69,5 @@ export function Broadcast({ size = 24, color, ...otherProps }: IconProps) { d="M8.25 12C8.25 9.92893 9.92893 8.25 12 8.25C14.0711 8.25 15.75 9.92893 15.75 12C15.75 14.0711 14.0711 15.75 12 15.75C9.92893 15.75 8.25 14.0711 8.25 12ZM12 9.75C10.7574 9.75 9.75 10.7574 9.75 12C9.75 13.2426 10.7574 14.25 12 14.25C13.2426 14.25 14.25 13.2426 14.25 12C14.25 10.7574 13.2426 9.75 12 9.75Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/BroadcastSlash.tsx b/web/packages/design/src/Icon/Icons/BroadcastSlash.tsx index 9daee15d4bced..ab8e6327b4c29 100644 --- a/web/packages/design/src/Icon/Icons/BroadcastSlash.tsx +++ b/web/packages/design/src/Icon/Icons/BroadcastSlash.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function BroadcastSlash({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const BroadcastSlash = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Bubble.tsx b/web/packages/design/src/Icon/Icons/Bubble.tsx index c94ada7cff62e..b14de9605dff2 100644 --- a/web/packages/design/src/Icon/Icons/Bubble.tsx +++ b/web/packages/design/src/Icon/Icons/Bubble.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Bubble({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Bubble = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CCAmex.tsx b/web/packages/design/src/Icon/Icons/CCAmex.tsx index 542d55fc92e10..4a37623407611 100644 --- a/web/packages/design/src/Icon/Icons/CCAmex.tsx +++ b/web/packages/design/src/Icon/Icons/CCAmex.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CCAmex({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CCAmex = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CCDiscover.tsx b/web/packages/design/src/Icon/Icons/CCDiscover.tsx index 7667482ecc24c..7598078b826e7 100644 --- a/web/packages/design/src/Icon/Icons/CCDiscover.tsx +++ b/web/packages/design/src/Icon/Icons/CCDiscover.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CCDiscover({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CCDiscover = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CCMasterCard.tsx b/web/packages/design/src/Icon/Icons/CCMasterCard.tsx index 436f1275d9755..ce752556192f8 100644 --- a/web/packages/design/src/Icon/Icons/CCMasterCard.tsx +++ b/web/packages/design/src/Icon/Icons/CCMasterCard.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CCMasterCard({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CCMasterCard = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CCStripe.tsx b/web/packages/design/src/Icon/Icons/CCStripe.tsx index b4ba6f3287ca1..98acf2536c0f8 100644 --- a/web/packages/design/src/Icon/Icons/CCStripe.tsx +++ b/web/packages/design/src/Icon/Icons/CCStripe.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CCStripe({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CCStripe = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CCVisa.tsx b/web/packages/design/src/Icon/Icons/CCVisa.tsx index 0d72543be8e63..bfd54fbfc1907 100644 --- a/web/packages/design/src/Icon/Icons/CCVisa.tsx +++ b/web/packages/design/src/Icon/Icons/CCVisa.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CCVisa({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CCVisa = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Calendar.tsx b/web/packages/design/src/Icon/Icons/Calendar.tsx index 224ee96feacf9..7d8140bd01275 100644 --- a/web/packages/design/src/Icon/Icons/Calendar.tsx +++ b/web/packages/design/src/Icon/Icons/Calendar.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Calendar({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Calendar = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function Calendar({ size = 24, color, ...otherProps }: IconProps) { d="M17.25 2.25C17.25 1.83579 16.9142 1.5 16.5 1.5C16.0858 1.5 15.75 1.83579 15.75 2.25V3H8.25V2.25C8.25 1.83579 7.91421 1.5 7.5 1.5C7.08579 1.5 6.75 1.83579 6.75 2.25V3H4.5C3.67157 3 3 3.67157 3 4.5V19.5C3 20.3284 3.67157 21 4.5 21H19.5C20.3284 21 21 20.3284 21 19.5V4.5C21 3.67157 20.3284 3 19.5 3H17.25V2.25ZM19.5 4.5H17.25V5.25C17.25 5.66421 16.9142 6 16.5 6C16.0858 6 15.75 5.66421 15.75 5.25V4.5H8.25V5.25C8.25 5.66421 7.91421 6 7.5 6C7.08579 6 6.75 5.66421 6.75 5.25V4.5H4.5V7.5H19.5V4.5ZM4.5 9H19.5V19.5H4.5V9Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Camera.tsx b/web/packages/design/src/Icon/Icons/Camera.tsx index a8d7d728bbb09..bb781f825356c 100644 --- a/web/packages/design/src/Icon/Icons/Camera.tsx +++ b/web/packages/design/src/Icon/Icons/Camera.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Camera({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Camera = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CardView.tsx b/web/packages/design/src/Icon/Icons/CardView.tsx index ee0dcc238e8c9..0683805ae8db6 100644 --- a/web/packages/design/src/Icon/Icons/CardView.tsx +++ b/web/packages/design/src/Icon/Icons/CardView.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CardView({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CardView = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Cash.tsx b/web/packages/design/src/Icon/Icons/Cash.tsx index 83dd3b199412c..bdca4615dffa3 100644 --- a/web/packages/design/src/Icon/Icons/Cash.tsx +++ b/web/packages/design/src/Icon/Icons/Cash.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Cash({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Cash = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Chart.tsx b/web/packages/design/src/Icon/Icons/Chart.tsx index 4baecea557d5a..222e7224e658d 100644 --- a/web/packages/design/src/Icon/Icons/Chart.tsx +++ b/web/packages/design/src/Icon/Icons/Chart.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Chart({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Chart = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChatBubble.tsx b/web/packages/design/src/Icon/Icons/ChatBubble.tsx index ed809e95fc934..714f83e25421d 100644 --- a/web/packages/design/src/Icon/Icons/ChatBubble.tsx +++ b/web/packages/design/src/Icon/Icons/ChatBubble.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChatBubble({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ChatBubble = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChatCircleSparkle.tsx b/web/packages/design/src/Icon/Icons/ChatCircleSparkle.tsx index cd046fb281347..414ae583daaf1 100644 --- a/web/packages/design/src/Icon/Icons/ChatCircleSparkle.tsx +++ b/web/packages/design/src/Icon/Icons/ChatCircleSparkle.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChatCircleSparkle({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ChatCircleSparkle = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Check.tsx b/web/packages/design/src/Icon/Icons/Check.tsx index f1de7010e4f3f..e82758226213e 100644 --- a/web/packages/design/src/Icon/Icons/Check.tsx +++ b/web/packages/design/src/Icon/Icons/Check.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Check({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Check = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CheckThick.tsx b/web/packages/design/src/Icon/Icons/CheckThick.tsx index 486e3c7aa923f..96db8cf7b63b8 100644 --- a/web/packages/design/src/Icon/Icons/CheckThick.tsx +++ b/web/packages/design/src/Icon/Icons/CheckThick.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CheckThick({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CheckThick = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Checks.tsx b/web/packages/design/src/Icon/Icons/Checks.tsx index d954dc9280dbc..581b12db4226a 100644 --- a/web/packages/design/src/Icon/Icons/Checks.tsx +++ b/web/packages/design/src/Icon/Icons/Checks.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Checks({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Checks = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronCircleDown.tsx b/web/packages/design/src/Icon/Icons/ChevronCircleDown.tsx index 91a7c1f7a5b16..6379f8e59d941 100644 --- a/web/packages/design/src/Icon/Icons/ChevronCircleDown.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronCircleDown.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronCircleDown({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ChevronCircleDown = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronCircleLeft.tsx b/web/packages/design/src/Icon/Icons/ChevronCircleLeft.tsx index 104a4df38804c..e996d8edd1d57 100644 --- a/web/packages/design/src/Icon/Icons/ChevronCircleLeft.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronCircleLeft.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronCircleLeft({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ChevronCircleLeft = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronCircleRight.tsx b/web/packages/design/src/Icon/Icons/ChevronCircleRight.tsx index 47ccf26a49971..4c3a86f8f2d2f 100644 --- a/web/packages/design/src/Icon/Icons/ChevronCircleRight.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronCircleRight.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronCircleRight({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ChevronCircleRight = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronCircleUp.tsx b/web/packages/design/src/Icon/Icons/ChevronCircleUp.tsx index f985f5e66c6a0..dc6367605f1d0 100644 --- a/web/packages/design/src/Icon/Icons/ChevronCircleUp.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronCircleUp.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronCircleUp({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ChevronCircleUp = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronDown.tsx b/web/packages/design/src/Icon/Icons/ChevronDown.tsx index c5af174bfec8e..907e78d1d6470 100644 --- a/web/packages/design/src/Icon/Icons/ChevronDown.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronDown.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronDown({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ChevronDown = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronLeft.tsx b/web/packages/design/src/Icon/Icons/ChevronLeft.tsx index 85b71f40eaadf..f50f5ff0faae5 100644 --- a/web/packages/design/src/Icon/Icons/ChevronLeft.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronLeft.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronLeft({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ChevronLeft = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronRight.tsx b/web/packages/design/src/Icon/Icons/ChevronRight.tsx index 7f4a6d0191a2d..fb1f999536beb 100644 --- a/web/packages/design/src/Icon/Icons/ChevronRight.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronRight.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronRight({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ChevronRight = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronUp.tsx b/web/packages/design/src/Icon/Icons/ChevronUp.tsx index c5fd2eb30803d..b68a8f49b9a99 100644 --- a/web/packages/design/src/Icon/Icons/ChevronUp.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronUp.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronUp({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ChevronUp = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ChevronsVertical.tsx b/web/packages/design/src/Icon/Icons/ChevronsVertical.tsx index 347ff0b8b9c08..af8c404deb649 100644 --- a/web/packages/design/src/Icon/Icons/ChevronsVertical.tsx +++ b/web/packages/design/src/Icon/Icons/ChevronsVertical.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,20 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ChevronsVertical({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ChevronsVertical = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CircleArrowLeft.tsx b/web/packages/design/src/Icon/Icons/CircleArrowLeft.tsx index 8012d3142b1a9..061067863c128 100644 --- a/web/packages/design/src/Icon/Icons/CircleArrowLeft.tsx +++ b/web/packages/design/src/Icon/Icons/CircleArrowLeft.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CircleArrowLeft({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const CircleArrowLeft = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CircleArrowRight.tsx b/web/packages/design/src/Icon/Icons/CircleArrowRight.tsx index 7001c924817cb..2a825ccd6a7eb 100644 --- a/web/packages/design/src/Icon/Icons/CircleArrowRight.tsx +++ b/web/packages/design/src/Icon/Icons/CircleArrowRight.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CircleArrowRight({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const CircleArrowRight = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CircleCheck.tsx b/web/packages/design/src/Icon/Icons/CircleCheck.tsx index 39db44d733a33..7225ede5a5cc3 100644 --- a/web/packages/design/src/Icon/Icons/CircleCheck.tsx +++ b/web/packages/design/src/Icon/Icons/CircleCheck.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CircleCheck({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CircleCheck = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CircleCross.tsx b/web/packages/design/src/Icon/Icons/CircleCross.tsx index aa3a24cb51552..0e3a30943ae79 100644 --- a/web/packages/design/src/Icon/Icons/CircleCross.tsx +++ b/web/packages/design/src/Icon/Icons/CircleCross.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CircleCross({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CircleCross = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CirclePause.tsx b/web/packages/design/src/Icon/Icons/CirclePause.tsx index 3150774b82681..fbec5ef543a86 100644 --- a/web/packages/design/src/Icon/Icons/CirclePause.tsx +++ b/web/packages/design/src/Icon/Icons/CirclePause.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CirclePause({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CirclePause = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function CirclePause({ size = 24, color, ...otherProps }: IconProps) { d="M2.25 12C2.25 6.61522 6.61522 2.25 12 2.25C17.3848 2.25 21.75 6.61522 21.75 12C21.75 17.3848 17.3848 21.75 12 21.75C6.61522 21.75 2.25 17.3848 2.25 12ZM12 3.75C7.44365 3.75 3.75 7.44365 3.75 12C3.75 16.5563 7.44365 20.25 12 20.25C16.5563 20.25 20.25 16.5563 20.25 12C20.25 7.44365 16.5563 3.75 12 3.75Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CirclePlay.tsx b/web/packages/design/src/Icon/Icons/CirclePlay.tsx index c28d3546b650d..869fe0ef99ff4 100644 --- a/web/packages/design/src/Icon/Icons/CirclePlay.tsx +++ b/web/packages/design/src/Icon/Icons/CirclePlay.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CirclePlay({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CirclePlay = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CircleStop.tsx b/web/packages/design/src/Icon/Icons/CircleStop.tsx index efaaa00e62d01..48d7ef006875f 100644 --- a/web/packages/design/src/Icon/Icons/CircleStop.tsx +++ b/web/packages/design/src/Icon/Icons/CircleStop.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CircleStop({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CircleStop = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Cli.tsx b/web/packages/design/src/Icon/Icons/Cli.tsx index 7222ba641585e..84a6243942be5 100644 --- a/web/packages/design/src/Icon/Icons/Cli.tsx +++ b/web/packages/design/src/Icon/Icons/Cli.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Cli({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Cli = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Clipboard.tsx b/web/packages/design/src/Icon/Icons/Clipboard.tsx index b15961ca1cba0..a5f2bbf7ed6f2 100644 --- a/web/packages/design/src/Icon/Icons/Clipboard.tsx +++ b/web/packages/design/src/Icon/Icons/Clipboard.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Clipboard({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Clipboard = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function Clipboard({ size = 24, color, ...otherProps }: IconProps) { d="M8.64589 3C8.70142 2.93792 8.75881 2.87723 8.81802 2.81802C9.66193 1.97411 10.8065 1.5 12 1.5C13.1935 1.5 14.3381 1.97411 15.182 2.81802C15.2412 2.87723 15.2986 2.93792 15.3541 3H18.75C19.1478 3 19.5294 3.15804 19.8107 3.43934C20.092 3.72065 20.25 4.10218 20.25 4.5V20.25C20.25 20.6478 20.092 21.0294 19.8107 21.3107C19.5294 21.592 19.1478 21.75 18.75 21.75H5.25C4.85218 21.75 4.47065 21.592 4.18934 21.3107C3.90804 21.0294 3.75 20.6478 3.75 20.25V4.5C3.75 4.10217 3.90804 3.72064 4.18934 3.43934C4.47064 3.15804 4.85217 3 5.25 3H8.64589ZM9.87868 3.87868C10.4413 3.31607 11.2044 3 12 3C12.7956 3 13.5587 3.31607 14.1213 3.87868C14.2202 3.97758 14.3115 4.08267 14.3948 4.19305C14.398 4.19746 14.4013 4.20184 14.4046 4.20617C14.7889 4.72126 15 5.34975 15 6H9C9 5.34975 9.21111 4.72126 9.59537 4.20617C9.5987 4.20183 9.60198 4.19746 9.60521 4.19305C9.6885 4.08268 9.77978 3.97758 9.87868 3.87868ZM7.75736 4.5H5.25L5.25 20.25H18.75L18.75 4.5H16.2426C16.4114 4.97731 16.5 5.48409 16.5 6V6.75C16.5 7.16421 16.1642 7.5 15.75 7.5H8.25C7.83579 7.5 7.5 7.16421 7.5 6.75V6C7.5 5.48409 7.58859 4.97731 7.75736 4.5Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ClipboardUser.tsx b/web/packages/design/src/Icon/Icons/ClipboardUser.tsx index 367ab412e6c65..8568eed3fbac7 100644 --- a/web/packages/design/src/Icon/Icons/ClipboardUser.tsx +++ b/web/packages/design/src/Icon/Icons/ClipboardUser.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ClipboardUser({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ClipboardUser = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Clock.tsx b/web/packages/design/src/Icon/Icons/Clock.tsx index edcea59a432c7..b1e5f61c42b0a 100644 --- a/web/packages/design/src/Icon/Icons/Clock.tsx +++ b/web/packages/design/src/Icon/Icons/Clock.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Clock({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Clock = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Cloud.tsx b/web/packages/design/src/Icon/Icons/Cloud.tsx index a9a6177628e8e..15bada7610b11 100644 --- a/web/packages/design/src/Icon/Icons/Cloud.tsx +++ b/web/packages/design/src/Icon/Icons/Cloud.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Cloud({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Cloud = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Cluster.tsx b/web/packages/design/src/Icon/Icons/Cluster.tsx index 0401a992421c8..f9cecdf71b635 100644 --- a/web/packages/design/src/Icon/Icons/Cluster.tsx +++ b/web/packages/design/src/Icon/Icons/Cluster.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Cluster({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Cluster = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Code.tsx b/web/packages/design/src/Icon/Icons/Code.tsx index 60d05a8959e99..dc843fa40cd41 100644 --- a/web/packages/design/src/Icon/Icons/Code.tsx +++ b/web/packages/design/src/Icon/Icons/Code.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Code({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Code = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Cog.tsx b/web/packages/design/src/Icon/Icons/Cog.tsx index 9f1d1d4b2e5cd..78e3733610bb5 100644 --- a/web/packages/design/src/Icon/Icons/Cog.tsx +++ b/web/packages/design/src/Icon/Icons/Cog.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Cog({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Cog = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Config.tsx b/web/packages/design/src/Icon/Icons/Config.tsx index aaa2de7747b90..d58b54ab9af50 100644 --- a/web/packages/design/src/Icon/Icons/Config.tsx +++ b/web/packages/design/src/Icon/Icons/Config.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Config({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Config = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -68,5 +71,5 @@ export function Config({ size = 24, color, ...otherProps }: IconProps) { d="M2.25 3.9C2.25 3.12257 2.79934 2.25 3.75 2.25H20.25C21.2007 2.25 21.75 3.12257 21.75 3.9V20.1C21.75 20.8774 21.2007 21.75 20.25 21.75H3.75C2.79934 21.75 2.25 20.8774 2.25 20.1V3.9ZM3.79101 3.75C3.77346 3.77438 3.75 3.82494 3.75 3.9V20.1C3.75 20.1751 3.77346 20.2256 3.79101 20.25H20.209C20.2265 20.2256 20.25 20.1751 20.25 20.1V3.9C20.25 3.82494 20.2265 3.77438 20.209 3.75H3.79101Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Contract.tsx b/web/packages/design/src/Icon/Icons/Contract.tsx index 7a900edf4bf0b..e2c4263e20c32 100644 --- a/web/packages/design/src/Icon/Icons/Contract.tsx +++ b/web/packages/design/src/Icon/Icons/Contract.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,18 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Contract({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Contract = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Copy.tsx b/web/packages/design/src/Icon/Icons/Copy.tsx index a27280c09fb7c..5647e8e260c53 100644 --- a/web/packages/design/src/Icon/Icons/Copy.tsx +++ b/web/packages/design/src/Icon/Icons/Copy.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Copy({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Copy = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/CreditCard.tsx b/web/packages/design/src/Icon/Icons/CreditCard.tsx index 5911ddc3140d0..feab4fbd36da2 100644 --- a/web/packages/design/src/Icon/Icons/CreditCard.tsx +++ b/web/packages/design/src/Icon/Icons/CreditCard.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function CreditCard({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const CreditCard = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Cross.tsx b/web/packages/design/src/Icon/Icons/Cross.tsx index dc9545e4e1bb0..a7d33893527b4 100644 --- a/web/packages/design/src/Icon/Icons/Cross.tsx +++ b/web/packages/design/src/Icon/Icons/Cross.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,10 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Cross({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Cross = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Crown.tsx b/web/packages/design/src/Icon/Icons/Crown.tsx index 4d6ccb97d9d43..a77187a9e3dbd 100644 --- a/web/packages/design/src/Icon/Icons/Crown.tsx +++ b/web/packages/design/src/Icon/Icons/Crown.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Crown({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Crown = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Database.tsx b/web/packages/design/src/Icon/Icons/Database.tsx index 7c0b1a2b14be6..793293815064b 100644 --- a/web/packages/design/src/Icon/Icons/Database.tsx +++ b/web/packages/design/src/Icon/Icons/Database.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Database({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Database = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Desktop.tsx b/web/packages/design/src/Icon/Icons/Desktop.tsx index 5ecdbec38b5a2..7e15189e5d7eb 100644 --- a/web/packages/design/src/Icon/Icons/Desktop.tsx +++ b/web/packages/design/src/Icon/Icons/Desktop.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Desktop({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Desktop = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/DeviceMobileCamera.tsx b/web/packages/design/src/Icon/Icons/DeviceMobileCamera.tsx index a1a0b628aaec1..a518902d4b9a2 100644 --- a/web/packages/design/src/Icon/Icons/DeviceMobileCamera.tsx +++ b/web/packages/design/src/Icon/Icons/DeviceMobileCamera.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function DeviceMobileCamera({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const DeviceMobileCamera = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Devices.tsx b/web/packages/design/src/Icon/Icons/Devices.tsx index dd4f6037b9fc0..8ce202e259c01 100644 --- a/web/packages/design/src/Icon/Icons/Devices.tsx +++ b/web/packages/design/src/Icon/Icons/Devices.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Devices({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Devices = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Download.tsx b/web/packages/design/src/Icon/Icons/Download.tsx index 388b84d2fbed9..c75abbeda14f5 100644 --- a/web/packages/design/src/Icon/Icons/Download.tsx +++ b/web/packages/design/src/Icon/Icons/Download.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Download({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Download = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Earth.tsx b/web/packages/design/src/Icon/Icons/Earth.tsx index 65bb7e21a4d2a..9d56b8350f9c4 100644 --- a/web/packages/design/src/Icon/Icons/Earth.tsx +++ b/web/packages/design/src/Icon/Icons/Earth.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Earth({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Earth = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Edit.tsx b/web/packages/design/src/Icon/Icons/Edit.tsx index 57b0303d2c1ab..2d832f7e6021a 100644 --- a/web/packages/design/src/Icon/Icons/Edit.tsx +++ b/web/packages/design/src/Icon/Icons/Edit.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Edit({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Edit = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Ellipsis.tsx b/web/packages/design/src/Icon/Icons/Ellipsis.tsx index fc4c96918955f..22ef42e8fc652 100644 --- a/web/packages/design/src/Icon/Icons/Ellipsis.tsx +++ b/web/packages/design/src/Icon/Icons/Ellipsis.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Ellipsis({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Ellipsis = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/EmailSolid.tsx b/web/packages/design/src/Icon/Icons/EmailSolid.tsx index 5360af236538e..00c4e28260939 100644 --- a/web/packages/design/src/Icon/Icons/EmailSolid.tsx +++ b/web/packages/design/src/Icon/Icons/EmailSolid.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function EmailSolid({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const EmailSolid = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/EnvelopeOpen.tsx b/web/packages/design/src/Icon/Icons/EnvelopeOpen.tsx index 0b9793cff885f..d6897f6ab90f8 100644 --- a/web/packages/design/src/Icon/Icons/EnvelopeOpen.tsx +++ b/web/packages/design/src/Icon/Icons/EnvelopeOpen.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function EnvelopeOpen({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const EnvelopeOpen = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/EqualizersVertical.tsx b/web/packages/design/src/Icon/Icons/EqualizersVertical.tsx index 540da0de4a764..0885164e6ffa7 100644 --- a/web/packages/design/src/Icon/Icons/EqualizersVertical.tsx +++ b/web/packages/design/src/Icon/Icons/EqualizersVertical.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function EqualizersVertical({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const EqualizersVertical = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -67,5 +66,5 @@ export function EqualizersVertical({ - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Expand.tsx b/web/packages/design/src/Icon/Icons/Expand.tsx index 780b626486e1f..e1e9bc86467b0 100644 --- a/web/packages/design/src/Icon/Icons/Expand.tsx +++ b/web/packages/design/src/Icon/Icons/Expand.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,18 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Expand({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Expand = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Facebook.tsx b/web/packages/design/src/Icon/Icons/Facebook.tsx index 68321b7d1c714..f2b33bfac090a 100644 --- a/web/packages/design/src/Icon/Icons/Facebook.tsx +++ b/web/packages/design/src/Icon/Icons/Facebook.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Facebook({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Facebook = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/FingerprintSimple.tsx b/web/packages/design/src/Icon/Icons/FingerprintSimple.tsx index b26fc3eae42c3..1740858bc27bc 100644 --- a/web/packages/design/src/Icon/Icons/FingerprintSimple.tsx +++ b/web/packages/design/src/Icon/Icons/FingerprintSimple.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function FingerprintSimple({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const FingerprintSimple = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Floppy.tsx b/web/packages/design/src/Icon/Icons/Floppy.tsx index 3d4e535e94360..b128d539d0eeb 100644 --- a/web/packages/design/src/Icon/Icons/Floppy.tsx +++ b/web/packages/design/src/Icon/Icons/Floppy.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,18 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Floppy({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Floppy = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/FlowArrow.tsx b/web/packages/design/src/Icon/Icons/FlowArrow.tsx index 28b67f46ead22..67c80917c0037 100644 --- a/web/packages/design/src/Icon/Icons/FlowArrow.tsx +++ b/web/packages/design/src/Icon/Icons/FlowArrow.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function FlowArrow({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const FlowArrow = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/FolderPlus.tsx b/web/packages/design/src/Icon/Icons/FolderPlus.tsx index 7aaa6cc4e0ae4..7560cfdbcc767 100644 --- a/web/packages/design/src/Icon/Icons/FolderPlus.tsx +++ b/web/packages/design/src/Icon/Icons/FolderPlus.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function FolderPlus({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const FolderPlus = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/FolderShared.tsx b/web/packages/design/src/Icon/Icons/FolderShared.tsx index bebbf3a5ea0e4..fa68d3bf9a6cc 100644 --- a/web/packages/design/src/Icon/Icons/FolderShared.tsx +++ b/web/packages/design/src/Icon/Icons/FolderShared.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function FolderShared({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const FolderShared = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/GitHub.tsx b/web/packages/design/src/Icon/Icons/GitHub.tsx index a691b48c92190..e319923a82f22 100644 --- a/web/packages/design/src/Icon/Icons/GitHub.tsx +++ b/web/packages/design/src/Icon/Icons/GitHub.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function GitHub({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const GitHub = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Google.tsx b/web/packages/design/src/Icon/Icons/Google.tsx index 9cde7f9a26e8c..0c6f4b7cee93b 100644 --- a/web/packages/design/src/Icon/Icons/Google.tsx +++ b/web/packages/design/src/Icon/Icons/Google.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Google({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Google = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Graph.tsx b/web/packages/design/src/Icon/Icons/Graph.tsx index 18cd6b75b8193..abdbf3e0b02c7 100644 --- a/web/packages/design/src/Icon/Icons/Graph.tsx +++ b/web/packages/design/src/Icon/Icons/Graph.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,10 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Graph({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Graph = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Hashtag.tsx b/web/packages/design/src/Icon/Icons/Hashtag.tsx index 51edfdc8c9790..b27a05bdb76b4 100644 --- a/web/packages/design/src/Icon/Icons/Hashtag.tsx +++ b/web/packages/design/src/Icon/Icons/Hashtag.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Hashtag({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Hashtag = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Headset.tsx b/web/packages/design/src/Icon/Icons/Headset.tsx index 8441b61f5d17b..77bfc140bb755 100644 --- a/web/packages/design/src/Icon/Icons/Headset.tsx +++ b/web/packages/design/src/Icon/Icons/Headset.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Headset({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Headset = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Home.tsx b/web/packages/design/src/Icon/Icons/Home.tsx index 6bc2ad3fac9cc..56c30696ade72 100644 --- a/web/packages/design/src/Icon/Icons/Home.tsx +++ b/web/packages/design/src/Icon/Icons/Home.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Home({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Home = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Info.tsx b/web/packages/design/src/Icon/Icons/Info.tsx index 2c2a05b0c084c..94502c5afef96 100644 --- a/web/packages/design/src/Icon/Icons/Info.tsx +++ b/web/packages/design/src/Icon/Icons/Info.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Info({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Info = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Integrations.tsx b/web/packages/design/src/Icon/Icons/Integrations.tsx index 336e8c08db40f..97b6e93061d28 100644 --- a/web/packages/design/src/Icon/Icons/Integrations.tsx +++ b/web/packages/design/src/Icon/Icons/Integrations.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Integrations({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Integrations = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Invoices.tsx b/web/packages/design/src/Icon/Icons/Invoices.tsx index c655cd3a02999..155dde536a928 100644 --- a/web/packages/design/src/Icon/Icons/Invoices.tsx +++ b/web/packages/design/src/Icon/Icons/Invoices.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Invoices({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Invoices = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function Invoices({ size = 24, color, ...otherProps }: IconProps) { d="M3.75 3.75C3.35217 3.75 2.97064 3.90804 2.68934 4.18934C2.40804 4.47064 2.25 4.85217 2.25 5.25V19.5C2.25 19.7599 2.38459 20.0013 2.6057 20.138C2.82681 20.2746 3.10292 20.2871 3.33541 20.1708L6 18.8385L8.66459 20.1708C8.87574 20.2764 9.12426 20.2764 9.33541 20.1708L12 18.8385L14.6646 20.1708C14.8757 20.2764 15.1243 20.2764 15.3354 20.1708L18 18.8385L20.6646 20.1708C20.8971 20.2871 21.1732 20.2746 21.3943 20.138C21.6154 20.0013 21.75 19.7599 21.75 19.5V5.25C21.75 4.85218 21.592 4.47065 21.3107 4.18934C21.0294 3.90804 20.6478 3.75 20.25 3.75H3.75ZM3.75 5.25L20.25 5.25V18.2865L18.3354 17.3292C18.1243 17.2236 17.8757 17.2236 17.6646 17.3292L15 18.6615L12.3354 17.3292C12.1243 17.2236 11.8757 17.2236 11.6646 17.3292L9 18.6615L6.33541 17.3292C6.12426 17.2236 5.87574 17.2236 5.66459 17.3292L3.75 18.2865L3.75 5.25Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Key.tsx b/web/packages/design/src/Icon/Icons/Key.tsx index 914dec7b3ffb1..06594fdb5cd41 100644 --- a/web/packages/design/src/Icon/Icons/Key.tsx +++ b/web/packages/design/src/Icon/Icons/Key.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Key({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Key = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/KeyHole.tsx b/web/packages/design/src/Icon/Icons/KeyHole.tsx index d4de448b1c294..ad645ee5bd63e 100644 --- a/web/packages/design/src/Icon/Icons/KeyHole.tsx +++ b/web/packages/design/src/Icon/Icons/KeyHole.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function KeyHole({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const KeyHole = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Keyboard.tsx b/web/packages/design/src/Icon/Icons/Keyboard.tsx index 83d4f855aa940..a82b738caf021 100644 --- a/web/packages/design/src/Icon/Icons/Keyboard.tsx +++ b/web/packages/design/src/Icon/Icons/Keyboard.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Keyboard({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Keyboard = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -67,5 +70,5 @@ export function Keyboard({ size = 24, color, ...otherProps }: IconProps) { d="M3.04594 4.5C2.19214 4.5 1.5 5.19214 1.5 6.04594V17.9541C1.5 18.8079 2.19214 19.5 3.04594 19.5H20.9541C21.8079 19.5 22.5 18.8079 22.5 17.9541V6.04594C22.5 5.19214 21.8079 4.5 20.9541 4.5H3.04594ZM3 6.04594C3 6.02057 3.02057 6 3.04594 6H20.9541C20.9794 6 21 6.02057 21 6.04594V17.9541C21 17.9794 20.9794 18 20.9541 18H3.04594C3.02057 18 3 17.9794 3 17.9541V6.04594Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Keypair.tsx b/web/packages/design/src/Icon/Icons/Keypair.tsx index 03b6b47d21c0d..d332e546d9c54 100644 --- a/web/packages/design/src/Icon/Icons/Keypair.tsx +++ b/web/packages/design/src/Icon/Icons/Keypair.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Keypair({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Keypair = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Kubernetes.tsx b/web/packages/design/src/Icon/Icons/Kubernetes.tsx index 5d437e3c335e8..e9fea277e12cd 100644 --- a/web/packages/design/src/Icon/Icons/Kubernetes.tsx +++ b/web/packages/design/src/Icon/Icons/Kubernetes.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Kubernetes({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Kubernetes = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Label.tsx b/web/packages/design/src/Icon/Icons/Label.tsx index 30d15b93a18a4..bf9b18ecab4a3 100644 --- a/web/packages/design/src/Icon/Icons/Label.tsx +++ b/web/packages/design/src/Icon/Icons/Label.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Label({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Label = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Lan.tsx b/web/packages/design/src/Icon/Icons/Lan.tsx index b096de016591c..04f2dc8465353 100644 --- a/web/packages/design/src/Icon/Icons/Lan.tsx +++ b/web/packages/design/src/Icon/Icons/Lan.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Lan({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Lan = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Laptop.tsx b/web/packages/design/src/Icon/Icons/Laptop.tsx index 19677af6c9213..6fbdc4360e141 100644 --- a/web/packages/design/src/Icon/Icons/Laptop.tsx +++ b/web/packages/design/src/Icon/Icons/Laptop.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Laptop({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Laptop = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Layout.tsx b/web/packages/design/src/Icon/Icons/Layout.tsx index d5c1852b80a21..a9ed1825bd0fb 100644 --- a/web/packages/design/src/Icon/Icons/Layout.tsx +++ b/web/packages/design/src/Icon/Icons/Layout.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,18 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Layout({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Layout = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/License.tsx b/web/packages/design/src/Icon/Icons/License.tsx index 5e97fa4095a64..ccdac10d91f37 100644 --- a/web/packages/design/src/Icon/Icons/License.tsx +++ b/web/packages/design/src/Icon/Icons/License.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function License({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const License = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/LineSegment.tsx b/web/packages/design/src/Icon/Icons/LineSegment.tsx index d3bf34cdfd31c..a181febb58075 100644 --- a/web/packages/design/src/Icon/Icons/LineSegment.tsx +++ b/web/packages/design/src/Icon/Icons/LineSegment.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,18 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function LineSegment({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const LineSegment = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/LineSegments.tsx b/web/packages/design/src/Icon/Icons/LineSegments.tsx index 4651cfa5974f8..4b1b2e4661853 100644 --- a/web/packages/design/src/Icon/Icons/LineSegments.tsx +++ b/web/packages/design/src/Icon/Icons/LineSegments.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,18 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function LineSegments({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const LineSegments = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Link.tsx b/web/packages/design/src/Icon/Icons/Link.tsx index de4c3ff1e6458..d20f71ab92765 100644 --- a/web/packages/design/src/Icon/Icons/Link.tsx +++ b/web/packages/design/src/Icon/Icons/Link.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,11 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Link({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Link = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Linkedin.tsx b/web/packages/design/src/Icon/Icons/Linkedin.tsx index ca886d25738f0..2ce67c7542812 100644 --- a/web/packages/design/src/Icon/Icons/Linkedin.tsx +++ b/web/packages/design/src/Icon/Icons/Linkedin.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Linkedin({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Linkedin = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -65,5 +68,5 @@ export function Linkedin({ size = 24, color, ...otherProps }: IconProps) { d="M2.25 3.75C2.25 2.92157 2.92157 2.25 3.75 2.25H20.25C21.0784 2.25 21.75 2.92157 21.75 3.75V20.25C21.75 21.0784 21.0784 21.75 20.25 21.75H3.75C2.92157 21.75 2.25 21.0784 2.25 20.25V3.75ZM20.25 3.75H3.75V20.25H20.25V3.75Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Linux.tsx b/web/packages/design/src/Icon/Icons/Linux.tsx index 6ba83d5480907..a6a392b11e7ac 100644 --- a/web/packages/design/src/Icon/Icons/Linux.tsx +++ b/web/packages/design/src/Icon/Icons/Linux.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Linux({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Linux = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ListAddCheck.tsx b/web/packages/design/src/Icon/Icons/ListAddCheck.tsx index 4434b2c203909..ff6051a33c432 100644 --- a/web/packages/design/src/Icon/Icons/ListAddCheck.tsx +++ b/web/packages/design/src/Icon/Icons/ListAddCheck.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ListAddCheck({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ListAddCheck = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -63,5 +66,5 @@ export function ListAddCheck({ size = 24, color, ...otherProps }: IconProps) { - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ListMagnifyingGlass.tsx b/web/packages/design/src/Icon/Icons/ListMagnifyingGlass.tsx index 4442a2fef2634..fb877475f6257 100644 --- a/web/packages/design/src/Icon/Icons/ListMagnifyingGlass.tsx +++ b/web/packages/design/src/Icon/Icons/ListMagnifyingGlass.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ListMagnifyingGlass({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const ListMagnifyingGlass = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -69,5 +68,5 @@ export function ListMagnifyingGlass({ d="M13.5 13.5C13.5 11.4289 15.1789 9.75 17.25 9.75C19.3211 9.75 21 11.4289 21 13.5C21 14.2642 20.7714 14.975 20.3789 15.5677L22.2803 17.4692C22.5732 17.7621 22.5732 18.237 22.2803 18.5299C21.9875 18.8228 21.5126 18.8228 21.2197 18.5299L19.3183 16.6285C18.7255 17.0213 18.0144 17.25 17.25 17.25C15.1789 17.25 13.5 15.5711 13.5 13.5ZM17.25 11.25C16.0074 11.25 15 12.2574 15 13.5C15 14.7426 16.0074 15.75 17.25 15.75C18.4926 15.75 19.5 14.7426 19.5 13.5C19.5 12.2574 18.4926 11.25 17.25 11.25Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ListThin.tsx b/web/packages/design/src/Icon/Icons/ListThin.tsx index b36f4f0d3cf9d..c6e8f676120f8 100644 --- a/web/packages/design/src/Icon/Icons/ListThin.tsx +++ b/web/packages/design/src/Icon/Icons/ListThin.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ListThin({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ListThin = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -63,5 +66,5 @@ export function ListThin({ size = 24, color, ...otherProps }: IconProps) { - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ListView.tsx b/web/packages/design/src/Icon/Icons/ListView.tsx index 89c936890a154..4187301593382 100644 --- a/web/packages/design/src/Icon/Icons/ListView.tsx +++ b/web/packages/design/src/Icon/Icons/ListView.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ListView({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ListView = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -63,5 +66,5 @@ export function ListView({ size = 24, color, ...otherProps }: IconProps) { - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Lock.tsx b/web/packages/design/src/Icon/Icons/Lock.tsx index 3923fc952f4ce..b03e85dcbda03 100644 --- a/web/packages/design/src/Icon/Icons/Lock.tsx +++ b/web/packages/design/src/Icon/Icons/Lock.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Lock({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Lock = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/LockKey.tsx b/web/packages/design/src/Icon/Icons/LockKey.tsx index a3efdab311c60..65b4043c5bcf5 100644 --- a/web/packages/design/src/Icon/Icons/LockKey.tsx +++ b/web/packages/design/src/Icon/Icons/LockKey.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function LockKey({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const LockKey = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Logout.tsx b/web/packages/design/src/Icon/Icons/Logout.tsx index fdffb67bf2e7d..fac3041b344a7 100644 --- a/web/packages/design/src/Icon/Icons/Logout.tsx +++ b/web/packages/design/src/Icon/Icons/Logout.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Logout({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Logout = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Magnifier.tsx b/web/packages/design/src/Icon/Icons/Magnifier.tsx index f6cf426ed470e..79655e11a879e 100644 --- a/web/packages/design/src/Icon/Icons/Magnifier.tsx +++ b/web/packages/design/src/Icon/Icons/Magnifier.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Magnifier({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Magnifier = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/MagnifyingMinus.tsx b/web/packages/design/src/Icon/Icons/MagnifyingMinus.tsx index 5919e5d2ea3be..067bd5da684aa 100644 --- a/web/packages/design/src/Icon/Icons/MagnifyingMinus.tsx +++ b/web/packages/design/src/Icon/Icons/MagnifyingMinus.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,22 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function MagnifyingMinus({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const MagnifyingMinus = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/MagnifyingPlus.tsx b/web/packages/design/src/Icon/Icons/MagnifyingPlus.tsx index 802f6bd222e98..f55daafef1397 100644 --- a/web/packages/design/src/Icon/Icons/MagnifyingPlus.tsx +++ b/web/packages/design/src/Icon/Icons/MagnifyingPlus.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,18 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function MagnifyingPlus({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const MagnifyingPlus = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Memory.tsx b/web/packages/design/src/Icon/Icons/Memory.tsx index 2ada215529af7..8eb153a6ae295 100644 --- a/web/packages/design/src/Icon/Icons/Memory.tsx +++ b/web/packages/design/src/Icon/Icons/Memory.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Memory({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Memory = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Minus.tsx b/web/packages/design/src/Icon/Icons/Minus.tsx index e8b0afa4e6e72..97727d7854100 100644 --- a/web/packages/design/src/Icon/Icons/Minus.tsx +++ b/web/packages/design/src/Icon/Icons/Minus.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Minus({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Minus = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/MinusCircle.tsx b/web/packages/design/src/Icon/Icons/MinusCircle.tsx index fe3a181ffd912..11b3d787bae04 100644 --- a/web/packages/design/src/Icon/Icons/MinusCircle.tsx +++ b/web/packages/design/src/Icon/Icons/MinusCircle.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function MinusCircle({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const MinusCircle = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Moon.tsx b/web/packages/design/src/Icon/Icons/Moon.tsx index 06b5340f80bd6..f6e169b4ae512 100644 --- a/web/packages/design/src/Icon/Icons/Moon.tsx +++ b/web/packages/design/src/Icon/Icons/Moon.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Moon({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Moon = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/MoreHoriz.tsx b/web/packages/design/src/Icon/Icons/MoreHoriz.tsx index e9695c13d7548..0bf7676adae6b 100644 --- a/web/packages/design/src/Icon/Icons/MoreHoriz.tsx +++ b/web/packages/design/src/Icon/Icons/MoreHoriz.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,18 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function MoreHoriz({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const MoreHoriz = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/MoreVert.tsx b/web/packages/design/src/Icon/Icons/MoreVert.tsx index 7803bf267c4d1..8a3856fcc7dbe 100644 --- a/web/packages/design/src/Icon/Icons/MoreVert.tsx +++ b/web/packages/design/src/Icon/Icons/MoreVert.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,18 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function MoreVert({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const MoreVert = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Mute.tsx b/web/packages/design/src/Icon/Icons/Mute.tsx index f7f29176f5306..ee416e148204c 100644 --- a/web/packages/design/src/Icon/Icons/Mute.tsx +++ b/web/packages/design/src/Icon/Icons/Mute.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Mute({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Mute = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/NewTab.tsx b/web/packages/design/src/Icon/Icons/NewTab.tsx index dc54aca3184b8..6f45612a8f486 100644 --- a/web/packages/design/src/Icon/Icons/NewTab.tsx +++ b/web/packages/design/src/Icon/Icons/NewTab.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function NewTab({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const NewTab = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/NoteAdded.tsx b/web/packages/design/src/Icon/Icons/NoteAdded.tsx index 615c027a59d99..b0d27da43eb6b 100644 --- a/web/packages/design/src/Icon/Icons/NoteAdded.tsx +++ b/web/packages/design/src/Icon/Icons/NoteAdded.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function NoteAdded({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const NoteAdded = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Notification.tsx b/web/packages/design/src/Icon/Icons/Notification.tsx index 8525bdb063edc..46e2747a18167 100644 --- a/web/packages/design/src/Icon/Icons/Notification.tsx +++ b/web/packages/design/src/Icon/Icons/Notification.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Notification({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Notification = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/NotificationsActive.tsx b/web/packages/design/src/Icon/Icons/NotificationsActive.tsx index f2e17823b55f4..1dc75203c3169 100644 --- a/web/packages/design/src/Icon/Icons/NotificationsActive.tsx +++ b/web/packages/design/src/Icon/Icons/NotificationsActive.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function NotificationsActive({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const NotificationsActive = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/PaperPlane.tsx b/web/packages/design/src/Icon/Icons/PaperPlane.tsx index da7149819be11..8d1129da5d3f7 100644 --- a/web/packages/design/src/Icon/Icons/PaperPlane.tsx +++ b/web/packages/design/src/Icon/Icons/PaperPlane.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function PaperPlane({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const PaperPlane = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Password.tsx b/web/packages/design/src/Icon/Icons/Password.tsx index 58c61e55fee4b..581b73f8d4324 100644 --- a/web/packages/design/src/Icon/Icons/Password.tsx +++ b/web/packages/design/src/Icon/Icons/Password.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,18 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Password({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Password = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Pencil.tsx b/web/packages/design/src/Icon/Icons/Pencil.tsx index f5365bdb8f34a..e3662f001f29c 100644 --- a/web/packages/design/src/Icon/Icons/Pencil.tsx +++ b/web/packages/design/src/Icon/Icons/Pencil.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Pencil({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Pencil = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Planet.tsx b/web/packages/design/src/Icon/Icons/Planet.tsx index 094830e362e69..886b8f17b68ef 100644 --- a/web/packages/design/src/Icon/Icons/Planet.tsx +++ b/web/packages/design/src/Icon/Icons/Planet.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Planet({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Planet = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Plugs.tsx b/web/packages/design/src/Icon/Icons/Plugs.tsx index 85a0bd37e2524..cfeab94172415 100644 --- a/web/packages/design/src/Icon/Icons/Plugs.tsx +++ b/web/packages/design/src/Icon/Icons/Plugs.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Plugs({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Plugs = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/PlugsConnected.tsx b/web/packages/design/src/Icon/Icons/PlugsConnected.tsx index c5ac04e1bc50a..9e8158669cc8f 100644 --- a/web/packages/design/src/Icon/Icons/PlugsConnected.tsx +++ b/web/packages/design/src/Icon/Icons/PlugsConnected.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function PlugsConnected({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const PlugsConnected = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Plus.tsx b/web/packages/design/src/Icon/Icons/Plus.tsx index 34e2fa1c6b449..7047e72bc4f08 100644 --- a/web/packages/design/src/Icon/Icons/Plus.tsx +++ b/web/packages/design/src/Icon/Icons/Plus.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,10 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Plus({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Plus = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/PowerSwitch.tsx b/web/packages/design/src/Icon/Icons/PowerSwitch.tsx index 67fc1a91675ca..c020f964c482b 100644 --- a/web/packages/design/src/Icon/Icons/PowerSwitch.tsx +++ b/web/packages/design/src/Icon/Icons/PowerSwitch.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function PowerSwitch({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const PowerSwitch = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Printer.tsx b/web/packages/design/src/Icon/Icons/Printer.tsx index 3d21ae29d3ccf..f60d3e8a05c6d 100644 --- a/web/packages/design/src/Icon/Icons/Printer.tsx +++ b/web/packages/design/src/Icon/Icons/Printer.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Printer({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Printer = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Profile.tsx b/web/packages/design/src/Icon/Icons/Profile.tsx index 889c4816c6e77..333340d0ecf15 100644 --- a/web/packages/design/src/Icon/Icons/Profile.tsx +++ b/web/packages/design/src/Icon/Icons/Profile.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Profile({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Profile = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -69,5 +72,5 @@ export function Profile({ size = 24, color, ...otherProps }: IconProps) { d="M3.75 3.75C2.92157 3.75 2.25 4.42157 2.25 5.25V18.75C2.25 19.5784 2.92157 20.25 3.75 20.25H20.25C21.0784 20.25 21.75 19.5784 21.75 18.75V5.25C21.75 4.42157 21.0784 3.75 20.25 3.75H3.75ZM3.75 5.25H20.25V18.75H3.75V5.25Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/PushPin.tsx b/web/packages/design/src/Icon/Icons/PushPin.tsx index 025f6cccc1638..75923a7e06592 100644 --- a/web/packages/design/src/Icon/Icons/PushPin.tsx +++ b/web/packages/design/src/Icon/Icons/PushPin.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function PushPin({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const PushPin = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/PushPinFilled.tsx b/web/packages/design/src/Icon/Icons/PushPinFilled.tsx index 18d06de5014de..e1c7faa378d68 100644 --- a/web/packages/design/src/Icon/Icons/PushPinFilled.tsx +++ b/web/packages/design/src/Icon/Icons/PushPinFilled.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function PushPinFilled({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const PushPinFilled = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Question.tsx b/web/packages/design/src/Icon/Icons/Question.tsx index 6596b7ae58790..1c6cde3a1d45c 100644 --- a/web/packages/design/src/Icon/Icons/Question.tsx +++ b/web/packages/design/src/Icon/Icons/Question.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Question({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Question = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function Question({ size = 24, color, ...otherProps }: IconProps) { d="M12 2.25C6.61522 2.25 2.25 6.61522 2.25 12C2.25 17.3848 6.61522 21.75 12 21.75C17.3848 21.75 21.75 17.3848 21.75 12C21.75 6.61522 17.3848 2.25 12 2.25ZM3.75 12C3.75 7.44365 7.44365 3.75 12 3.75C16.5563 3.75 20.25 7.44365 20.25 12C20.25 16.5563 16.5563 20.25 12 20.25C7.44365 20.25 3.75 16.5563 3.75 12Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Refresh.tsx b/web/packages/design/src/Icon/Icons/Refresh.tsx index 04e7a78e54682..a69d03c9ecdca 100644 --- a/web/packages/design/src/Icon/Icons/Refresh.tsx +++ b/web/packages/design/src/Icon/Icons/Refresh.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,15 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Refresh({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Refresh = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Restore.tsx b/web/packages/design/src/Icon/Icons/Restore.tsx index 9ebf98fec530f..92b9e6532d2d4 100644 --- a/web/packages/design/src/Icon/Icons/Restore.tsx +++ b/web/packages/design/src/Icon/Icons/Restore.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Restore({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Restore = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/RocketLaunch.tsx b/web/packages/design/src/Icon/Icons/RocketLaunch.tsx index 500dc5d066453..018328c2848b0 100644 --- a/web/packages/design/src/Icon/Icons/RocketLaunch.tsx +++ b/web/packages/design/src/Icon/Icons/RocketLaunch.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function RocketLaunch({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const RocketLaunch = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Rows.tsx b/web/packages/design/src/Icon/Icons/Rows.tsx index 7abbbbe61f1c1..7b6d34f119846 100644 --- a/web/packages/design/src/Icon/Icons/Rows.tsx +++ b/web/packages/design/src/Icon/Icons/Rows.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Rows({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Rows = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Ruler.tsx b/web/packages/design/src/Icon/Icons/Ruler.tsx index 6385435788595..3b9a433db7af9 100644 --- a/web/packages/design/src/Icon/Icons/Ruler.tsx +++ b/web/packages/design/src/Icon/Icons/Ruler.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Ruler({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Ruler = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Run.tsx b/web/packages/design/src/Icon/Icons/Run.tsx index 1849f30733505..c8151a8c4c28c 100644 --- a/web/packages/design/src/Icon/Icons/Run.tsx +++ b/web/packages/design/src/Icon/Icons/Run.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Run({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Run = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Scan.tsx b/web/packages/design/src/Icon/Icons/Scan.tsx index 3958fcb89978f..1e18137fe68d2 100644 --- a/web/packages/design/src/Icon/Icons/Scan.tsx +++ b/web/packages/design/src/Icon/Icons/Scan.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Scan({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Scan = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Server.tsx b/web/packages/design/src/Icon/Icons/Server.tsx index 4cf45cde553c3..b07e3d373b459 100644 --- a/web/packages/design/src/Icon/Icons/Server.tsx +++ b/web/packages/design/src/Icon/Icons/Server.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Server({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Server = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Share.tsx b/web/packages/design/src/Icon/Icons/Share.tsx index 1d3ee4fc17e86..525bc3e8e312f 100644 --- a/web/packages/design/src/Icon/Icons/Share.tsx +++ b/web/packages/design/src/Icon/Icons/Share.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Share({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Share = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ShieldCheck.tsx b/web/packages/design/src/Icon/Icons/ShieldCheck.tsx index 81e0e07c06784..3972e434d1d95 100644 --- a/web/packages/design/src/Icon/Icons/ShieldCheck.tsx +++ b/web/packages/design/src/Icon/Icons/ShieldCheck.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ShieldCheck({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ShieldCheck = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/ShieldWarning.tsx b/web/packages/design/src/Icon/Icons/ShieldWarning.tsx index efe86555542ae..b8a1c10c32e4c 100644 --- a/web/packages/design/src/Icon/Icons/ShieldWarning.tsx +++ b/web/packages/design/src/Icon/Icons/ShieldWarning.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function ShieldWarning({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const ShieldWarning = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function ShieldWarning({ size = 24, color, ...otherProps }: IconProps) { d="M3.43934 4.18934C3.72064 3.90804 4.10217 3.75 4.5 3.75H19.5C19.8978 3.75 20.2794 3.90804 20.5607 4.18934C20.842 4.47065 21 4.85218 21 5.25V10.7597C21 19.1792 13.8537 21.9599 12.4706 22.4203C12.1656 22.5244 11.8348 22.5244 11.5298 22.4204C10.1444 21.9611 3 19.1829 3 10.7616V5.25C3 4.85217 3.15804 4.47064 3.43934 4.18934ZM19.5 5.25L4.5 5.25L4.5 10.7616C4.5 18.112 10.6965 20.5635 12 20.996C13.3064 20.5606 19.5 18.1058 19.5 10.7597V5.25Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Sliders.tsx b/web/packages/design/src/Icon/Icons/Sliders.tsx index 29d00763a60db..b6e0d96106c65 100644 --- a/web/packages/design/src/Icon/Icons/Sliders.tsx +++ b/web/packages/design/src/Icon/Icons/Sliders.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Sliders({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Sliders = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/SlidersVertical.tsx b/web/packages/design/src/Icon/Icons/SlidersVertical.tsx index 1d5516cea0881..4f2fd67ddcf19 100644 --- a/web/packages/design/src/Icon/Icons/SlidersVertical.tsx +++ b/web/packages/design/src/Icon/Icons/SlidersVertical.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,17 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function SlidersVertical({ - size = 24, - color, - ...otherProps -}: IconProps) { - return ( +export const SlidersVertical = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Speed.tsx b/web/packages/design/src/Icon/Icons/Speed.tsx index b13e226a9ae67..62316da943096 100644 --- a/web/packages/design/src/Icon/Icons/Speed.tsx +++ b/web/packages/design/src/Icon/Icons/Speed.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Speed({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Speed = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Spinner.tsx b/web/packages/design/src/Icon/Icons/Spinner.tsx index a2531e04bfa83..b659dc396ccc3 100644 --- a/web/packages/design/src/Icon/Icons/Spinner.tsx +++ b/web/packages/design/src/Icon/Icons/Spinner.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Spinner({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Spinner = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/SquaresFour.tsx b/web/packages/design/src/Icon/Icons/SquaresFour.tsx index f92dee14796cb..43a44c4841bc2 100644 --- a/web/packages/design/src/Icon/Icons/SquaresFour.tsx +++ b/web/packages/design/src/Icon/Icons/SquaresFour.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function SquaresFour({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const SquaresFour = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Stars.tsx b/web/packages/design/src/Icon/Icons/Stars.tsx index e2a3772248c42..4017a951bd74a 100644 --- a/web/packages/design/src/Icon/Icons/Stars.tsx +++ b/web/packages/design/src/Icon/Icons/Stars.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Stars({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Stars = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Sun.tsx b/web/packages/design/src/Icon/Icons/Sun.tsx index 5d6a010667989..f813885be84f2 100644 --- a/web/packages/design/src/Icon/Icons/Sun.tsx +++ b/web/packages/design/src/Icon/Icons/Sun.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Sun({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Sun = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/SyncAlt.tsx b/web/packages/design/src/Icon/Icons/SyncAlt.tsx index ef97c1bbf8bd5..22b5755d2f84a 100644 --- a/web/packages/design/src/Icon/Icons/SyncAlt.tsx +++ b/web/packages/design/src/Icon/Icons/SyncAlt.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function SyncAlt({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const SyncAlt = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Table.tsx b/web/packages/design/src/Icon/Icons/Table.tsx index eeb8c2c490a2e..8a67dfdaecc70 100644 --- a/web/packages/design/src/Icon/Icons/Table.tsx +++ b/web/packages/design/src/Icon/Icons/Table.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Table({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Table = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Tablet.tsx b/web/packages/design/src/Icon/Icons/Tablet.tsx index 19bc87a1b4588..97e2de34ec170 100644 --- a/web/packages/design/src/Icon/Icons/Tablet.tsx +++ b/web/packages/design/src/Icon/Icons/Tablet.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Tablet({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Tablet = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Tags.tsx b/web/packages/design/src/Icon/Icons/Tags.tsx index 5eda6492f1d3c..24fcd68ab51b7 100644 --- a/web/packages/design/src/Icon/Icons/Tags.tsx +++ b/web/packages/design/src/Icon/Icons/Tags.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Tags({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Tags = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Terminal.tsx b/web/packages/design/src/Icon/Icons/Terminal.tsx index d97b57aafad5f..bcbf267204420 100644 --- a/web/packages/design/src/Icon/Icons/Terminal.tsx +++ b/web/packages/design/src/Icon/Icons/Terminal.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Terminal({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Terminal = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Trash.tsx b/web/packages/design/src/Icon/Icons/Trash.tsx index 91cc228a8905b..680b7d4e88b12 100644 --- a/web/packages/design/src/Icon/Icons/Trash.tsx +++ b/web/packages/design/src/Icon/Icons/Trash.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Trash({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Trash = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Twitter.tsx b/web/packages/design/src/Icon/Icons/Twitter.tsx index 534beaa7f8a95..f69a0902f30a3 100644 --- a/web/packages/design/src/Icon/Icons/Twitter.tsx +++ b/web/packages/design/src/Icon/Icons/Twitter.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Twitter({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Twitter = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Unarchive.tsx b/web/packages/design/src/Icon/Icons/Unarchive.tsx index 9a94dcb3efe58..db582586399d4 100644 --- a/web/packages/design/src/Icon/Icons/Unarchive.tsx +++ b/web/packages/design/src/Icon/Icons/Unarchive.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Unarchive({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Unarchive = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Unlink.tsx b/web/packages/design/src/Icon/Icons/Unlink.tsx index b37625602faf0..4e4b6ed8d5f51 100644 --- a/web/packages/design/src/Icon/Icons/Unlink.tsx +++ b/web/packages/design/src/Icon/Icons/Unlink.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Unlink({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Unlink = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -63,5 +66,5 @@ export function Unlink({ size = 24, color, ...otherProps }: IconProps) { - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Unlock.tsx b/web/packages/design/src/Icon/Icons/Unlock.tsx index 204504d9fe7ba..6f3b94037c557 100644 --- a/web/packages/design/src/Icon/Icons/Unlock.tsx +++ b/web/packages/design/src/Icon/Icons/Unlock.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Unlock({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Unlock = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Upload.tsx b/web/packages/design/src/Icon/Icons/Upload.tsx index 03b2672311be8..1cf22e0f4e46d 100644 --- a/web/packages/design/src/Icon/Icons/Upload.tsx +++ b/web/packages/design/src/Icon/Icons/Upload.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,16 +50,17 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Upload({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Upload = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/UsbDrive.tsx b/web/packages/design/src/Icon/Icons/UsbDrive.tsx index 92edd134e15da..edc1394598921 100644 --- a/web/packages/design/src/Icon/Icons/UsbDrive.tsx +++ b/web/packages/design/src/Icon/Icons/UsbDrive.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function UsbDrive({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const UsbDrive = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function UsbDrive({ size = 24, color, ...otherProps }: IconProps) { d="M15.4782 2.59067L11.6295 6.43932C10.738 6.32356 9.8045 6.60808 9.11968 7.29289L2.96022 13.4524C1.78865 14.6239 1.78865 16.5234 2.96022 17.695L6.30388 21.0386C7.47545 22.2102 9.37494 22.2102 10.5465 21.0386L16.706 14.8792C17.3255 14.2596 17.6174 13.4365 17.5817 12.6252L21.5472 8.65971C22.3282 7.87866 22.3282 6.61233 21.5472 5.83128L18.3066 2.59067C17.5255 1.80962 16.2592 1.80962 15.4782 2.59067ZM20.4865 6.89194L17.2459 3.65133C17.0507 3.45607 16.7341 3.45607 16.5388 3.65133L13.1212 7.06898L13.1612 7.10897C13.2303 7.16668 13.2974 7.22799 13.3623 7.29289L16.706 10.6365C16.7709 10.7014 16.8322 10.7686 16.8899 10.8377L17.0689 11.0167L20.4865 7.59905C20.6818 7.40378 20.6818 7.0872 20.4865 6.89194ZM10.1803 8.35355C10.7201 7.8138 11.5689 7.77138 12.1572 8.22631L15.7726 11.8417C16.2275 12.43 16.1851 13.2788 15.6453 13.8185L9.48586 19.978C8.90007 20.5638 7.95032 20.5638 7.36454 19.978L4.02088 16.6343C3.4351 16.0485 3.4351 15.0988 4.02088 14.513L10.1803 8.35355Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/User.tsx b/web/packages/design/src/Icon/Icons/User.tsx index 788281c91cda3..c215423670e17 100644 --- a/web/packages/design/src/Icon/Icons/User.tsx +++ b/web/packages/design/src/Icon/Icons/User.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function User({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const User = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/UserAdd.tsx b/web/packages/design/src/Icon/Icons/UserAdd.tsx index e3e5765d47ab9..f04141fd86832 100644 --- a/web/packages/design/src/Icon/Icons/UserAdd.tsx +++ b/web/packages/design/src/Icon/Icons/UserAdd.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function UserAdd({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const UserAdd = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/UserCircleGear.tsx b/web/packages/design/src/Icon/Icons/UserCircleGear.tsx index c17c37454bab3..16be34e4bb583 100644 --- a/web/packages/design/src/Icon/Icons/UserCircleGear.tsx +++ b/web/packages/design/src/Icon/Icons/UserCircleGear.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function UserCircleGear({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const UserCircleGear = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/UserFocus.tsx b/web/packages/design/src/Icon/Icons/UserFocus.tsx index de99762dcc5aa..f7e68aea23336 100644 --- a/web/packages/design/src/Icon/Icons/UserFocus.tsx +++ b/web/packages/design/src/Icon/Icons/UserFocus.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function UserFocus({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const UserFocus = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -66,5 +69,5 @@ export function UserFocus({ size = 24, color, ...otherProps }: IconProps) { d="M8.25 10.5C8.25 8.42893 9.92893 6.75 12 6.75C14.0711 6.75 15.75 8.42893 15.75 10.5C15.75 11.5981 15.278 12.5859 14.5259 13.2717C14.6355 13.319 14.7439 13.3695 14.851 13.423C15.7362 13.8656 16.5062 14.5083 17.1 15.3C17.3486 15.6314 17.2814 16.1015 16.95 16.35C16.6187 16.5985 16.1486 16.5314 15.9 16.2C15.446 15.5945 14.8571 15.1031 14.1802 14.7647C13.5033 14.4262 12.7569 14.25 12 14.25C11.2432 14.25 10.4968 14.4262 9.81988 14.7647C9.14296 15.1031 8.55414 15.5945 8.10004 16.2C7.85152 16.5314 7.38141 16.5985 7.05004 16.35C6.71867 16.1015 6.65152 15.6314 6.90004 15.3C7.49386 14.5083 8.26385 13.8656 9.14906 13.423C9.25613 13.3695 9.36453 13.3191 9.47413 13.2718C8.72198 12.5859 8.25 11.5981 8.25 10.5ZM12 8.25C10.7574 8.25 9.75 9.25736 9.75 10.5C9.75 11.7426 10.7574 12.75 12 12.75C13.2426 12.75 14.25 11.7426 14.25 10.5C14.25 9.25736 13.2426 8.25 12 8.25Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/UserIdBadge.tsx b/web/packages/design/src/Icon/Icons/UserIdBadge.tsx index f340c591ea015..a6e02ffbb5599 100644 --- a/web/packages/design/src/Icon/Icons/UserIdBadge.tsx +++ b/web/packages/design/src/Icon/Icons/UserIdBadge.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function UserIdBadge({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const UserIdBadge = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/UserList.tsx b/web/packages/design/src/Icon/Icons/UserList.tsx index 49b90b8042aea..0105fc61cbee9 100644 --- a/web/packages/design/src/Icon/Icons/UserList.tsx +++ b/web/packages/design/src/Icon/Icons/UserList.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function UserList({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const UserList = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Users.tsx b/web/packages/design/src/Icon/Icons/Users.tsx index 93b0313cc4e6e..8afd0f8c86000 100644 --- a/web/packages/design/src/Icon/Icons/Users.tsx +++ b/web/packages/design/src/Icon/Icons/Users.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Users({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Users = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/UsersTriple.tsx b/web/packages/design/src/Icon/Icons/UsersTriple.tsx index 314f934eaddc1..d1a58ea40fd72 100644 --- a/web/packages/design/src/Icon/Icons/UsersTriple.tsx +++ b/web/packages/design/src/Icon/Icons/UsersTriple.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function UsersTriple({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const UsersTriple = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Vault.tsx b/web/packages/design/src/Icon/Icons/Vault.tsx index e4389813ca53c..197c030ed97b2 100644 --- a/web/packages/design/src/Icon/Icons/Vault.tsx +++ b/web/packages/design/src/Icon/Icons/Vault.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,14 +50,20 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Vault({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Vault = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/VideoGame.tsx b/web/packages/design/src/Icon/Icons/VideoGame.tsx index 921e5c6873525..6b97c2fab375e 100644 --- a/web/packages/design/src/Icon/Icons/VideoGame.tsx +++ b/web/packages/design/src/Icon/Icons/VideoGame.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function VideoGame({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const VideoGame = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function VideoGame({ size = 24, color, ...otherProps }: IconProps) { d="M7.86299 3.75L7.86465 3.75H16.125C17.6168 3.75 19.0475 4.34263 20.1024 5.39753C20.9281 6.22321 21.4706 7.27916 21.6674 8.41444L23.1986 16.2894C23.323 16.9962 23.2189 17.7242 22.9014 18.3678C22.5839 19.0114 22.0696 19.5371 21.433 19.8684C20.7965 20.1998 20.0709 20.3197 19.3616 20.2106C18.6523 20.1015 17.9962 19.7692 17.4887 19.2618C17.4774 19.2506 17.4666 19.2391 17.4561 19.2272L13.7321 15H10.2678L6.54398 19.227C6.5335 19.2389 6.52264 19.2505 6.51142 19.2617C6.00386 19.769 5.34779 20.1014 4.6385 20.2104C3.9292 20.3195 3.2036 20.1997 2.56705 19.8683C1.93051 19.5369 1.41616 19.0113 1.09869 18.3677C0.781228 17.7241 0.677175 16.9961 0.801643 16.2893L0.803948 16.2762L2.33656 8.39453C2.56636 7.09633 3.24488 5.92 4.25375 5.07105C5.26441 4.2206 6.54212 3.75293 7.86299 3.75ZM15.7312 15L18.5646 18.2163C18.8444 18.4895 19.2027 18.6685 19.5896 18.728C19.9836 18.7886 20.3868 18.722 20.7404 18.5379C21.094 18.3538 21.3798 18.0618 21.5561 17.7043C21.7316 17.3485 21.7897 16.9464 21.7222 16.5556L20.9032 12.3431C20.6776 12.7063 20.4096 13.0453 20.1024 13.3525C19.0475 14.4074 17.6168 15 16.125 15H15.7312ZM20.1839 8.63936C20.1856 8.65107 20.1876 8.6628 20.1898 8.67454L20.1917 8.68429C20.2302 8.91101 20.25 9.14199 20.25 9.375C20.25 10.469 19.8154 11.5182 19.0418 12.2918C18.2682 13.0654 17.219 13.5 16.125 13.5H9.92903C9.71363 13.5 9.50864 13.5926 9.36626 13.7542L5.4355 18.2161C5.15572 18.4893 4.79736 18.6684 4.4105 18.7279C4.01644 18.7885 3.61333 18.7219 3.25969 18.5378C2.90606 18.3537 2.62031 18.0617 2.44394 17.7041C2.26847 17.3484 2.21035 16.9462 2.27786 16.5555L3.81024 8.6744L3.81271 8.66106C3.98035 7.70704 4.47839 6.84244 5.21954 6.21877C5.96048 5.59527 6.89716 5.25234 7.86551 5.25H16.125C17.219 5.25 18.2682 5.6846 19.0418 6.45819C19.6397 7.05611 20.0351 7.8187 20.1839 8.63936Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/VolumeUp.tsx b/web/packages/design/src/Icon/Icons/VolumeUp.tsx index e73821d0d469a..8b3d1b21618f8 100644 --- a/web/packages/design/src/Icon/Icons/VolumeUp.tsx +++ b/web/packages/design/src/Icon/Icons/VolumeUp.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function VolumeUp({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const VolumeUp = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/VpnKey.tsx b/web/packages/design/src/Icon/Icons/VpnKey.tsx index 6276a9ec4ee24..f6a2e10267bf7 100644 --- a/web/packages/design/src/Icon/Icons/VpnKey.tsx +++ b/web/packages/design/src/Icon/Icons/VpnKey.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function VpnKey({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const VpnKey = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Wand.tsx b/web/packages/design/src/Icon/Icons/Wand.tsx index 2cfa8a811f68f..8e1ba8801f0a6 100644 --- a/web/packages/design/src/Icon/Icons/Wand.tsx +++ b/web/packages/design/src/Icon/Icons/Wand.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,9 +50,15 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Wand({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Wand = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Warning.tsx b/web/packages/design/src/Icon/Icons/Warning.tsx index e31c760e89565..4d126599460f5 100644 --- a/web/packages/design/src/Icon/Icons/Warning.tsx +++ b/web/packages/design/src/Icon/Icons/Warning.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Warning({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Warning = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function Warning({ size = 24, color, ...otherProps }: IconProps) { d="M10.09 3.34287L1.88101 17.7086C1.04292 19.1753 2.10193 21.0001 3.79114 21.0001H20.2092C21.8984 21.0001 22.9574 19.1753 22.1193 17.7086L13.9103 3.34287C13.0657 1.86488 10.9346 1.86488 10.09 3.34287ZM3.18337 18.4528L11.3924 4.08708C11.6611 3.61681 12.3392 3.61681 12.6079 4.08708L20.8169 18.4528C21.0836 18.9195 20.7466 19.5001 20.2092 19.5001H3.79114C3.25367 19.5001 2.91671 18.9195 3.18337 18.4528Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/WarningCircle.tsx b/web/packages/design/src/Icon/Icons/WarningCircle.tsx index 3ffc90d7e30fc..82e68a0d017d5 100644 --- a/web/packages/design/src/Icon/Icons/WarningCircle.tsx +++ b/web/packages/design/src/Icon/Icons/WarningCircle.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function WarningCircle({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const WarningCircle = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( @@ -64,5 +67,5 @@ export function WarningCircle({ size = 24, color, ...otherProps }: IconProps) { d="M2.25 12C2.25 6.61522 6.61522 2.25 12 2.25C17.3848 2.25 21.75 6.61522 21.75 12C21.75 17.3848 17.3848 21.75 12 21.75C6.61522 21.75 2.25 17.3848 2.25 12ZM12 3.75C7.44365 3.75 3.75 7.44365 3.75 12C3.75 16.5563 7.44365 20.25 12 20.25C16.5563 20.25 20.25 16.5563 20.25 12C20.25 7.44365 16.5563 3.75 12 3.75Z" /> - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Wifi.tsx b/web/packages/design/src/Icon/Icons/Wifi.tsx index 0e026e1552250..7c068122e6108 100644 --- a/web/packages/design/src/Icon/Icons/Wifi.tsx +++ b/web/packages/design/src/Icon/Icons/Wifi.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,19 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Wifi({ size = 24, color, ...otherProps }: IconProps) { - return ( - +export const Wifi = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Windows.tsx b/web/packages/design/src/Icon/Icons/Windows.tsx index d8a11289af6f3..a32fd0fa48c04 100644 --- a/web/packages/design/src/Icon/Icons/Windows.tsx +++ b/web/packages/design/src/Icon/Icons/Windows.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Windows({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Windows = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Wrench.tsx b/web/packages/design/src/Icon/Icons/Wrench.tsx index 094c3a998fbf6..0cf8329385a41 100644 --- a/web/packages/design/src/Icon/Icons/Wrench.tsx +++ b/web/packages/design/src/Icon/Icons/Wrench.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Wrench({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Wrench = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/Icons/Youtube.tsx b/web/packages/design/src/Icon/Icons/Youtube.tsx index a84aaa7b6c92b..106dbc3b48c89 100644 --- a/web/packages/design/src/Icon/Icons/Youtube.tsx +++ b/web/packages/design/src/Icon/Icons/Youtube.tsx @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,13 +50,14 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function Youtube({ size = 24, color, ...otherProps }: IconProps) { - return ( +export const Youtube = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( - ); -} + ) +); diff --git a/web/packages/design/src/Icon/script/IconTemplate.txt b/web/packages/design/src/Icon/script/IconTemplate.txt index e204eb99dfa0e..ff1f2a332129d 100644 --- a/web/packages/design/src/Icon/script/IconTemplate.txt +++ b/web/packages/design/src/Icon/script/IconTemplate.txt @@ -40,6 +40,8 @@ SOFTWARE. */ +import { forwardRef } from 'react'; + import { Icon, IconProps } from '../Icon'; /* @@ -48,10 +50,16 @@ THIS FILE IS GENERATED. DO NOT EDIT. */ -export function {ICON_NAME}({ size = 24, color, ...otherProps}: IconProps) { - return ( - +export const {ICON_NAME} = forwardRef( + ({ size = 24, color, ...otherProps }, ref) => ( + {PATHS} - ); -} + ) +); diff --git a/web/packages/design/src/SVGIcon/SvgIcon.story.tsx b/web/packages/design/src/SVGIcon/SvgIcon.story.tsx index 4929004984873..d7c8d9f18cfcd 100644 --- a/web/packages/design/src/SVGIcon/SvgIcon.story.tsx +++ b/web/packages/design/src/SVGIcon/SvgIcon.story.tsx @@ -16,7 +16,7 @@ * along with this program. If not, see . */ -import { ReactNode } from 'react'; +import { Fragment, ReactNode } from 'react'; import { useTheme } from 'styled-components'; import { IconCircle } from 'design/Icon/IconCircle'; @@ -38,14 +38,14 @@ export const CustomIcons = () => { const size = 64; return ( - <> + - + ); })} diff --git a/web/packages/teleport/src/Navigation/RecentHistory.tsx b/web/packages/teleport/src/Navigation/RecentHistory.tsx index e46deb50089a2..e3a4fdc929efc 100644 --- a/web/packages/teleport/src/Navigation/RecentHistory.tsx +++ b/web/packages/teleport/src/Navigation/RecentHistory.tsx @@ -16,7 +16,7 @@ * along with this program. If not, see . */ -import { useEffect, useRef, useState } from 'react'; +import { ReactNode, useEffect, useRef, useState } from 'react'; import { matchPath } from 'react-router'; import { NavLink } from 'react-router-dom'; import styled from 'styled-components'; @@ -44,7 +44,7 @@ type AnimatedItem = RecentHistoryItem & { function getIconForRoute( features: TeleportFeature[], route: string -): (props) => JSX.Element { +): (props) => ReactNode { const feature = features.find(feature => matchPath(route, { path: feature?.route?.path, @@ -166,7 +166,7 @@ function AnimatedHistoryItem({ onRemove, }: { item: AnimatedItem; - Icon: (props) => JSX.Element; + Icon: (props) => ReactNode; onRemove: () => void; }) { const [hovered, setHovered] = useState(false); diff --git a/web/packages/teleport/src/Navigation/SideNavigation/CategoryIcon.tsx b/web/packages/teleport/src/Navigation/SideNavigation/CategoryIcon.tsx index 8453fdcae2c45..1d1b4a24e673a 100644 --- a/web/packages/teleport/src/Navigation/SideNavigation/CategoryIcon.tsx +++ b/web/packages/teleport/src/Navigation/SideNavigation/CategoryIcon.tsx @@ -16,6 +16,8 @@ * along with this program. If not, see . */ +import { ReactNode } from 'react'; + import * as Icons from 'design/Icon'; import { @@ -33,7 +35,7 @@ export function CategoryIcon({ size?: number; color?: string; }) { - let Icon: ({ size, color }) => JSX.Element; + let Icon: ({ size, color }) => ReactNode; switch (category) { case NavigationCategory.Resources: Icon = Icons.Server; diff --git a/web/packages/teleport/src/Navigation/SideNavigation/Navigation.tsx b/web/packages/teleport/src/Navigation/SideNavigation/Navigation.tsx index 763cd6b2490cf..f98f008f5c559 100644 --- a/web/packages/teleport/src/Navigation/SideNavigation/Navigation.tsx +++ b/web/packages/teleport/src/Navigation/SideNavigation/Navigation.tsx @@ -18,6 +18,7 @@ import type * as history from 'history'; import React, { + ReactNode, useCallback, useEffect, useMemo, @@ -87,7 +88,7 @@ export type NavigationSubsection = { title: string; route: string; exact: boolean; - icon: (props) => JSX.Element; + icon: (props) => ReactNode; parent?: TeleportFeature; searchableTags?: string[]; /** diff --git a/web/packages/teleport/src/Sessions/SessionList/SessionList.tsx b/web/packages/teleport/src/Sessions/SessionList/SessionList.tsx index 02c8c15f36286..cbb952b7be3a1 100644 --- a/web/packages/teleport/src/Sessions/SessionList/SessionList.tsx +++ b/web/packages/teleport/src/Sessions/SessionList/SessionList.tsx @@ -16,6 +16,7 @@ * along with this program. If not, see . */ +import { ReactNode } from 'react'; import styled from 'styled-components'; import Table, { Cell } from 'design/DataTable'; @@ -99,7 +100,7 @@ export default function SessionList(props: Props) { } const kinds: { - [key in SessionKind]: { icon: (any) => JSX.Element; joinable: boolean }; + [key in SessionKind]: { icon: (any) => ReactNode; joinable: boolean }; } = { ssh: { icon: Icons.Cli, joinable: true }, k8s: { icon: Icons.Kubernetes, joinable: false }, diff --git a/web/packages/teleport/src/TopBar/DeviceTrustIcon.tsx b/web/packages/teleport/src/TopBar/DeviceTrustIcon.tsx index 4c0a25de81c9d..52bb0b92cbb87 100644 --- a/web/packages/teleport/src/TopBar/DeviceTrustIcon.tsx +++ b/web/packages/teleport/src/TopBar/DeviceTrustIcon.tsx @@ -16,6 +16,7 @@ * along with this program. If not, see . */ +import { ReactNode } from 'react'; import styled from 'styled-components'; import { Flex } from 'design'; @@ -54,7 +55,7 @@ const ShieldIcon = ({ color, ...props }: { - Icon: (props: IconProps) => JSX.Element; + Icon: (props: IconProps) => ReactNode; iconSize: number; color: string; }) => { diff --git a/web/packages/teleport/src/TopBar/TopBar.tsx b/web/packages/teleport/src/TopBar/TopBar.tsx index 696cdff2cbd42..a75c8c8608c9a 100644 --- a/web/packages/teleport/src/TopBar/TopBar.tsx +++ b/web/packages/teleport/src/TopBar/TopBar.tsx @@ -17,7 +17,7 @@ */ import type * as history from 'history'; -import React, { useCallback, useEffect, useState } from 'react'; +import React, { ReactNode, useCallback, useEffect, useState } from 'react'; import { matchPath, useHistory } from 'react-router'; import { Link } from 'react-router-dom'; import styled, { useTheme } from 'styled-components'; @@ -376,7 +376,7 @@ const MainNavItem = ({ to: string; size: number; name: string; - Icon: (props: { color: string; size: number }) => JSX.Element; + Icon: (props: { color: string; size: number }) => ReactNode; }) => { const { currentWidth } = useLayout(); const theme: Theme = useTheme(); diff --git a/web/packages/teleport/src/components/TabIcon/TabIcon.tsx b/web/packages/teleport/src/components/TabIcon/TabIcon.tsx index 1790a19bcb2ca..8b22da1727cb7 100644 --- a/web/packages/teleport/src/components/TabIcon/TabIcon.tsx +++ b/web/packages/teleport/src/components/TabIcon/TabIcon.tsx @@ -16,6 +16,7 @@ * along with this program. If not, see . */ +import { ReactNode } from 'react'; import styled from 'styled-components'; import { H3 } from 'design'; @@ -38,7 +39,7 @@ type Props = { active: boolean; onClick(): void; title: string; - Icon: (any) => JSX.Element; + Icon: (any) => ReactNode; }; const StyledTab = styled(H3)<{ active?: boolean }>` diff --git a/web/packages/teleport/src/types.ts b/web/packages/teleport/src/types.ts index 356dc95027ee1..69e909cd5011c 100644 --- a/web/packages/teleport/src/types.ts +++ b/web/packages/teleport/src/types.ts @@ -16,7 +16,7 @@ * along with this program. If not, see . */ -import React from 'react'; +import React, { ReactNode } from 'react'; import { UserPreferences } from 'gen-proto-ts/teleport/lib/teleterm/v1/service_pb'; @@ -36,7 +36,7 @@ export interface Context { export interface TeleportFeatureNavigationItem { title: NavTitle; - icon: (props) => JSX.Element; + icon: (props) => ReactNode; exact?: boolean; getLink?(clusterId: string): string; isExternalLink?: boolean; From b0b56b1b2905ae4d2871e57f6a2cb6329e3c1272 Mon Sep 17 00:00:00 2001 From: Bartosz Leper Date: Thu, 9 Jan 2025 13:38:55 +0100 Subject: [PATCH 23/45] Cleanup: Remove deprecated tooltip component aliases (#49551) * Remove deprecated tooltip component aliases * Replace two more references to the old alias --- .../shared/components/ToolTip/index.ts | 25 ------------------- web/packages/teleport/src/Roles/Roles.tsx | 2 +- web/packages/teleport/src/Users/Users.tsx | 2 +- 3 files changed, 2 insertions(+), 27 deletions(-) delete mode 100644 web/packages/shared/components/ToolTip/index.ts diff --git a/web/packages/shared/components/ToolTip/index.ts b/web/packages/shared/components/ToolTip/index.ts deleted file mode 100644 index f1be185cb4ae6..0000000000000 --- a/web/packages/shared/components/ToolTip/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -export { - /** @deprecated Use `TooltipInfo` from `design/Tooltip` */ - IconTooltip as ToolTipInfo, - - /** @deprecated Use `HoverTooltip` from `design/Tooltip` */ - HoverTooltip, -} from 'design/Tooltip'; diff --git a/web/packages/teleport/src/Roles/Roles.tsx b/web/packages/teleport/src/Roles/Roles.tsx index 2698d115b4d13..fd138d323d91f 100644 --- a/web/packages/teleport/src/Roles/Roles.tsx +++ b/web/packages/teleport/src/Roles/Roles.tsx @@ -21,13 +21,13 @@ import styled from 'styled-components'; import { Alert, Box, Button, Flex, H3, Link } from 'design'; import { P } from 'design/Text/Text'; +import { HoverTooltip } from 'design/Tooltip'; import { MissingPermissionsTooltip } from 'shared/components/MissingPermissionsTooltip'; import { Notification, NotificationItem, NotificationSeverity, } from 'shared/components/Notification'; -import { HoverTooltip } from 'shared/components/ToolTip'; import { useServerSidePagination } from 'teleport/components/hooks'; import { diff --git a/web/packages/teleport/src/Users/Users.tsx b/web/packages/teleport/src/Users/Users.tsx index 7bd73b2d135d0..3c2ab57741a26 100644 --- a/web/packages/teleport/src/Users/Users.tsx +++ b/web/packages/teleport/src/Users/Users.tsx @@ -19,7 +19,7 @@ import React from 'react'; import { Alert, Box, Button, Flex, Indicator, Link, Text } from 'design'; -import { HoverTooltip } from 'shared/components/ToolTip'; +import { HoverTooltip } from 'design/Tooltip'; import { FeatureBox, From 9dc860d5fea70535323b2029822a06a34c63a2e9 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Thu, 9 Jan 2025 09:18:57 -0500 Subject: [PATCH 24/45] Fix proto resource 153 marshalling for autoupdate_* resources (#50688) * Fix proto resource 153 marshalling * Update tool/tctl/common/collection_test.go Co-authored-by: Alan Parra * Update tool/tctl/common/collection_test.go Co-authored-by: Alan Parra * Address feedback - Change from Resource153AdapterV2 to ProtoResource153Adapter - fix test failures and unmarshal proto resources properly - add a failing round-trip proto 153 test case - bonus: fix the table tesst reosurce create that did not support running a single row * Apply suggestions from code review Co-authored-by: Alan Parra * lint --------- Co-authored-by: Alan Parra --- api/types/resource_153.go | 39 +++++++++++ tool/tctl/common/collection.go | 6 +- tool/tctl/common/collection_test.go | 69 +++++++++++++++++++ tool/tctl/common/helpers_test.go | 9 +++ tool/tctl/common/resource_command_test.go | 82 ++++++++++++++++------- 5 files changed, 177 insertions(+), 28 deletions(-) diff --git a/api/types/resource_153.go b/api/types/resource_153.go index a09c39451cd3d..dbe69a1108466 100644 --- a/api/types/resource_153.go +++ b/api/types/resource_153.go @@ -18,6 +18,8 @@ import ( "encoding/json" "time" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" headerv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/header/v1" @@ -124,6 +126,10 @@ func (r *legacyToResource153Adapter) GetVersion() string { // [Resource] type. Implements [ResourceWithLabels] and CloneResource (where the) // wrapped resource supports cloning). // +// Resources153 implemented by proto-generated structs should use ProtoResource153ToLegacy +// instead as it will ensure the protobuf message is properly marshaled to JSON +// with protojson. +// // Note that CheckAndSetDefaults is a noop for the returned resource and // SetSubKind is not implemented and panics on use. func Resource153ToLegacy(r Resource153) Resource { @@ -348,3 +354,36 @@ func (r *resource153ToUnifiedResourceAdapter) CloneResource() ResourceWithLabels clone := r.inner.(ClonableResource153).CloneResource() return Resource153ToUnifiedResource(clone) } + +// ProtoResource153 is a Resource153 implemented by a protobuf-generated struct. +type ProtoResource153 interface { + Resource153 + proto.Message +} + +type protoResource153ToLegacyAdapter struct { + inner ProtoResource153 + resource153ToLegacyAdapter +} + +// MarshalJSON adds support for marshaling the wrapped resource (instead of +// marshaling the adapter itself). +func (r *protoResource153ToLegacyAdapter) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseProtoNames: true, + }.Marshal(r.inner) +} + +// ProtoResource153ToLegacy transforms an RFD 153 style resource implemented by +// a proto-generated struct into a legacy [Resource] type. Implements +// [ResourceWithLabels] and CloneResource (where the wrapped resource supports +// cloning). +// +// Note that CheckAndSetDefaults is a noop for the returned resource and +// SetSubKind is not implemented and panics on use. +func ProtoResource153ToLegacy(r ProtoResource153) Resource { + return &protoResource153ToLegacyAdapter{ + r, + resource153ToLegacyAdapter{r}, + } +} diff --git a/tool/tctl/common/collection.go b/tool/tctl/common/collection.go index c31d2a25ed0bf..c1ea21addc2b6 100644 --- a/tool/tctl/common/collection.go +++ b/tool/tctl/common/collection.go @@ -1908,7 +1908,7 @@ type autoUpdateConfigCollection struct { } func (c *autoUpdateConfigCollection) resources() []types.Resource { - return []types.Resource{types.Resource153ToLegacy(c.config)} + return []types.Resource{types.ProtoResource153ToLegacy(c.config)} } func (c *autoUpdateConfigCollection) writeText(w io.Writer, verbose bool) error { @@ -1926,7 +1926,7 @@ type autoUpdateVersionCollection struct { } func (c *autoUpdateVersionCollection) resources() []types.Resource { - return []types.Resource{types.Resource153ToLegacy(c.version)} + return []types.Resource{types.ProtoResource153ToLegacy(c.version)} } func (c *autoUpdateVersionCollection) writeText(w io.Writer, verbose bool) error { @@ -1944,7 +1944,7 @@ type autoUpdateAgentRolloutCollection struct { } func (c *autoUpdateAgentRolloutCollection) resources() []types.Resource { - return []types.Resource{types.Resource153ToLegacy(c.rollout)} + return []types.Resource{types.ProtoResource153ToLegacy(c.rollout)} } func (c *autoUpdateAgentRolloutCollection) writeText(w io.Writer, verbose bool) error { diff --git a/tool/tctl/common/collection_test.go b/tool/tctl/common/collection_test.go index 166c5f6901599..f0679b9a65581 100644 --- a/tool/tctl/common/collection_test.go +++ b/tool/tctl/common/collection_test.go @@ -27,13 +27,19 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/uuid" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/durationpb" + kyaml "k8s.io/apimachinery/pkg/util/yaml" "github.com/gravitational/teleport/api" + autoupdatev1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/autoupdate/v1" dbobjectv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/dbobject/v1" dbobjectimportrulev1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/dbobjectimportrule/v1" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/autoupdate" "github.com/gravitational/teleport/api/types/label" "github.com/gravitational/teleport/lib/asciitable" + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/srv/db/common/databaseobject" "github.com/gravitational/teleport/lib/srv/db/common/databaseobjectimportrule" "github.com/gravitational/teleport/tool/common" @@ -431,3 +437,66 @@ func makeTestLabels(extraStaticLabels map[string]string) map[string]string { maps.Copy(labels, extraStaticLabels) return labels } + +// autoUpdateConfigBrokenCollection is an intentionally broken version of the +// autoUpdateConfigCollection that is not marshaling resources properly because +// it's doing json marshaling instead of protojson marshaling. +type autoUpdateConfigBrokenCollection struct { + autoUpdateConfigCollection +} + +func (c *autoUpdateConfigBrokenCollection) resources() []types.Resource { + // We use Resource153ToLegacy instead of ProtoResource153ToLegacy. + return []types.Resource{types.Resource153ToLegacy(c.config)} +} + +// This test makes sure we marshal and unmarshal proto-based Resource153 properly. +// We had a bug where types.Resource153 implemented by protobuf structs were not +// marshaled properly (they should be marshaled using protojson). This test +// checks we can do a round-trip with one of those proto-struct resource. +func TestRoundTripProtoResource153(t *testing.T) { + // Test setup: generate fixture. + initial, err := autoupdate.NewAutoUpdateConfig(&autoupdatev1pb.AutoUpdateConfigSpec{ + Agents: &autoupdatev1pb.AutoUpdateConfigSpecAgents{ + Mode: autoupdate.AgentsUpdateModeEnabled, + Strategy: autoupdate.AgentsStrategyTimeBased, + MaintenanceWindowDuration: durationpb.New(1 * time.Hour), + Schedules: &autoupdatev1pb.AgentAutoUpdateSchedules{ + Regular: []*autoupdatev1pb.AgentAutoUpdateGroup{ + { + Name: "group1", + Days: []string{types.Wildcard}, + }, + }, + }, + }, + }) + require.NoError(t, err) + + // Test execution: dump the resource into a YAML manifest. + collection := &autoUpdateConfigCollection{config: initial} + buf := &bytes.Buffer{} + require.NoError(t, writeYAML(collection, buf)) + + // Test execution: load the YAML manifest back. + decoder := kyaml.NewYAMLOrJSONDecoder(buf, defaults.LookaheadBufSize) + var raw services.UnknownResource + require.NoError(t, decoder.Decode(&raw)) + result, err := services.UnmarshalProtoResource[*autoupdatev1pb.AutoUpdateConfig](raw.Raw) + require.NoError(t, err) + + // Test validation: check that the loaded content matches what we had before. + require.Equal(t, result, initial) + + // Test execution: now dump the resource into a YAML manifest with a + // collection using types.Resource153ToLegacy instead of types.ProtoResource153ToLegacy + brokenCollection := &autoUpdateConfigBrokenCollection{autoUpdateConfigCollection{initial}} + buf = &bytes.Buffer{} + require.NoError(t, writeYAML(brokenCollection, buf)) + + // Test execution: load the YAML manifest back and see that we can't unmarshal it. + decoder = kyaml.NewYAMLOrJSONDecoder(buf, defaults.LookaheadBufSize) + require.NoError(t, decoder.Decode(&raw)) + _, err = services.UnmarshalProtoResource[*autoupdatev1pb.AutoUpdateConfig](raw.Raw) + require.Error(t, err) +} diff --git a/tool/tctl/common/helpers_test.go b/tool/tctl/common/helpers_test.go index 0cf773852c96f..b235a40e8b5e2 100644 --- a/tool/tctl/common/helpers_test.go +++ b/tool/tctl/common/helpers_test.go @@ -35,6 +35,7 @@ import ( "github.com/jonboulle/clockwork" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" + kyaml "k8s.io/apimachinery/pkg/util/yaml" "github.com/gravitational/teleport/api/breaker" apidefaults "github.com/gravitational/teleport/api/defaults" @@ -43,6 +44,7 @@ import ( "github.com/gravitational/teleport/lib/config" "github.com/gravitational/teleport/lib/service" "github.com/gravitational/teleport/lib/service/servicecfg" + "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/utils" commonclient "github.com/gravitational/teleport/tool/tctl/common/client" tctlcfg "github.com/gravitational/teleport/tool/tctl/common/config" @@ -153,6 +155,13 @@ func mustDecodeJSON[T any](t *testing.T, r io.Reader) T { return out } +func mustTranscodeYAMLToJSON(t *testing.T, r io.Reader) []byte { + decoder := kyaml.NewYAMLToJSONDecoder(r) + var resource services.UnknownResource + require.NoError(t, decoder.Decode(&resource)) + return resource.Raw +} + func mustDecodeYAMLDocuments[T any](t *testing.T, r io.Reader, out *[]T) { t.Helper() decoder := yaml.NewDecoder(r) diff --git a/tool/tctl/common/resource_command_test.go b/tool/tctl/common/resource_command_test.go index 61b2c2650f53a..ff280c07a6f08 100644 --- a/tool/tctl/common/resource_command_test.go +++ b/tool/tctl/common/resource_command_test.go @@ -36,6 +36,7 @@ import ( "github.com/jonboulle/clockwork" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/testing/protocmp" "k8s.io/apimachinery/pkg/util/yaml" @@ -1371,17 +1372,29 @@ func TestCreateResources(t *testing.T) { process := testenv.MakeTestServer(t, testenv.WithLogger(utils.NewSlogLoggerForTests())) rootClient := testenv.MakeDefaultAuthClient(t, process) + // tctlGetAllValidations allows tests to register post-test validations to validate + // that their resource is present in "tctl get all" output. + // This allows running test rows instead of the whole test table. + var tctlGetAllValidations []func(t *testing.T, out string) + tests := []struct { - kind string - create func(t *testing.T, clt *authclient.Client) + kind string + create func(t *testing.T, clt *authclient.Client) + getAllCheck func(t *testing.T, out string) }{ { kind: types.KindGithubConnector, create: testCreateGithubConnector, + getAllCheck: func(t *testing.T, s string) { + assert.Contains(t, s, "kind: github") + }, }, { kind: types.KindRole, create: testCreateRole, + getAllCheck: func(t *testing.T, s string) { + assert.Contains(t, s, "kind: role") + }, }, { kind: types.KindServerInfo, @@ -1390,6 +1403,9 @@ func TestCreateResources(t *testing.T) { { kind: types.KindUser, create: testCreateUser, + getAllCheck: func(t *testing.T, s string) { + assert.Contains(t, s, "kind: user") + }, }, { kind: types.KindDatabaseObjectImportRule, @@ -1402,10 +1418,16 @@ func TestCreateResources(t *testing.T) { { kind: types.KindClusterNetworkingConfig, create: testCreateClusterNetworkingConfig, + getAllCheck: func(t *testing.T, s string) { + assert.Contains(t, s, "kind: cluster_networking_config") + }, }, { kind: types.KindClusterAuthPreference, create: testCreateAuthPreference, + getAllCheck: func(t *testing.T, s string) { + assert.Contains(t, s, "kind: cluster_auth_preference") + }, }, { kind: types.KindSessionRecordingConfig, @@ -1440,6 +1462,9 @@ func TestCreateResources(t *testing.T) { for _, test := range tests { t.Run(test.kind, func(t *testing.T) { test.create(t, rootClient) + if test.getAllCheck != nil { + tctlGetAllValidations = append(tctlGetAllValidations, test.getAllCheck) + } }) } @@ -1447,12 +1472,9 @@ func TestCreateResources(t *testing.T) { out, err := runResourceCommand(t, rootClient, []string{"get", "all"}) require.NoError(t, err) s := out.String() - require.NotEmpty(t, s) - assert.Contains(t, s, "kind: github") - assert.Contains(t, s, "kind: cluster_auth_preference") - assert.Contains(t, s, "kind: cluster_networking_config") - assert.Contains(t, s, "kind: user") - assert.Contains(t, s, "kind: role") + for _, validateGetAll := range tctlGetAllValidations { + validateGetAll(t, s) + } } func testCreateGithubConnector(t *testing.T, clt *authclient.Client) { @@ -2326,18 +2348,21 @@ version: v1 _, err = runResourceCommand(t, clt, []string{"create", resourceYAMLPath}) require.NoError(t, err) - // Get the resource buf, err := runResourceCommand(t, clt, []string{"get", types.KindAutoUpdateConfig, "--format=json"}) require.NoError(t, err) - resources := mustDecodeJSON[[]*autoupdate.AutoUpdateConfig](t, buf) - require.Len(t, resources, 1) + + rawResources := mustDecodeJSON[[]services.UnknownResource](t, buf) + require.Len(t, rawResources, 1) + var resource autoupdate.AutoUpdateConfig + require.NoError(t, protojson.Unmarshal(rawResources[0].Raw, &resource)) var expected autoupdate.AutoUpdateConfig - require.NoError(t, yaml.Unmarshal([]byte(resourceYAML), &expected)) + expectedJSON := mustTranscodeYAMLToJSON(t, bytes.NewReader([]byte(resourceYAML))) + require.NoError(t, protojson.Unmarshal(expectedJSON, &expected)) require.Empty(t, cmp.Diff( - []*autoupdate.AutoUpdateConfig{&expected}, - resources, + &expected, + &resource, protocmp.IgnoreFields(&headerv1.Metadata{}, "revision"), protocmp.Transform(), )) @@ -2368,18 +2393,21 @@ version: v1 _, err = runResourceCommand(t, clt, []string{"create", resourceYAMLPath}) require.NoError(t, err) - // Get the resource buf, err := runResourceCommand(t, clt, []string{"get", types.KindAutoUpdateVersion, "--format=json"}) require.NoError(t, err) - resources := mustDecodeJSON[[]*autoupdate.AutoUpdateVersion](t, buf) - require.Len(t, resources, 1) + + rawResources := mustDecodeJSON[[]services.UnknownResource](t, buf) + require.Len(t, rawResources, 1) + var resource autoupdate.AutoUpdateVersion + require.NoError(t, protojson.Unmarshal(rawResources[0].Raw, &resource)) var expected autoupdate.AutoUpdateVersion - require.NoError(t, yaml.Unmarshal([]byte(resourceYAML), &expected)) + expectedJSON := mustTranscodeYAMLToJSON(t, bytes.NewReader([]byte(resourceYAML))) + require.NoError(t, protojson.Unmarshal(expectedJSON, &expected)) require.Empty(t, cmp.Diff( - []*autoupdate.AutoUpdateVersion{&expected}, - resources, + &expected, + &resource, protocmp.IgnoreFields(&headerv1.Metadata{}, "revision"), protocmp.Transform(), )) @@ -2423,15 +2451,19 @@ version: v1 // Get the resource buf, err := runResourceCommand(t, clt, []string{"get", types.KindAutoUpdateAgentRollout, "--format=json"}) require.NoError(t, err) - resources := mustDecodeJSON[[]*autoupdate.AutoUpdateAgentRollout](t, buf) - require.Len(t, resources, 1) + + rawResources := mustDecodeJSON[[]services.UnknownResource](t, buf) + require.Len(t, rawResources, 1) + var resource autoupdate.AutoUpdateAgentRollout + require.NoError(t, protojson.Unmarshal(rawResources[0].Raw, &resource)) var expected autoupdate.AutoUpdateAgentRollout - require.NoError(t, yaml.Unmarshal([]byte(resourceYAML), &expected)) + expectedJSON := mustTranscodeYAMLToJSON(t, bytes.NewReader([]byte(resourceYAML))) + require.NoError(t, protojson.Unmarshal(expectedJSON, &expected)) require.Empty(t, cmp.Diff( - []*autoupdate.AutoUpdateAgentRollout{&expected}, - resources, + &expected, + &resource, protocmp.IgnoreFields(&headerv1.Metadata{}, "revision"), protocmp.Transform(), )) From c6e442f50a4bb67bc2c165939ebb1d7d6b0e6683 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Skrz=C4=99tnicki?= Date: Thu, 9 Jan 2025 16:17:06 +0100 Subject: [PATCH 25/45] Make Teleport Connect use TCP connection mode for Oracle (#50903) * Make Teleport Connect use TCP connection mode for Oracle * Add comments to params --- lib/teleterm/cmd/db.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/teleterm/cmd/db.go b/lib/teleterm/cmd/db.go index c18accc048b66..144d386224c38 100644 --- a/lib/teleterm/cmd/db.go +++ b/lib/teleterm/cmd/db.go @@ -71,6 +71,7 @@ func newDBCLICommandWithExecer(ctx context.Context, cluster *clusters.Cluster, g dbcmd.WithNoTLS(), dbcmd.WithTolerateMissingCLIClient(), dbcmd.WithExecer(execer), + dbcmd.WithOracleOpts(true /* can use TCP */, true /* has TCP servers */), dbcmd.WithGetDatabaseFunc(func(ctx context.Context, _ *client.TeleportClient, _ string) (types.Database, error) { getDatabaseOnce.Do(func() { database, getDatabaseError = cluster.GetDatabase(ctx, authClient, gateway.TargetURI()) From 588d02402ac173fcd38b8dae4532809165c1c7fd Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Thu, 9 Jan 2025 15:40:56 +0000 Subject: [PATCH 26/45] Add debugging steps for DiscoverEKS User Task issues (#50377) This PR adds description to all known discover EKS issues. It also adds URLs that the user can follow to fix or further debug the issue. The URLs are per-cluster, and they usually link to Amazon EKS Cluster page or one of its sub pages. --- lib/usertasks/descriptions.go | 19 +- .../descriptions/eks-agent-not-connecting.md | 8 + .../eks-authentication-mode-unsupported.md | 3 + .../descriptions/eks-cluster-unreachable.md | 5 + .../eks-missing-endpoint-public-access.md | 3 + .../descriptions/eks-status-not-active.md | 3 + lib/usertasks/descriptions_test.go | 3 + lib/usertasks/urls.go | 174 ++++++++++++++++++ lib/usertasks/urls_test.go | 151 +++++++++++++++ lib/web/ui/usertask.go | 23 ++- 10 files changed, 385 insertions(+), 7 deletions(-) create mode 100644 lib/usertasks/descriptions/eks-agent-not-connecting.md create mode 100644 lib/usertasks/descriptions/eks-authentication-mode-unsupported.md create mode 100644 lib/usertasks/descriptions/eks-cluster-unreachable.md create mode 100644 lib/usertasks/descriptions/eks-missing-endpoint-public-access.md create mode 100644 lib/usertasks/descriptions/eks-status-not-active.md create mode 100644 lib/usertasks/urls.go create mode 100644 lib/usertasks/urls_test.go diff --git a/lib/usertasks/descriptions.go b/lib/usertasks/descriptions.go index eb1655fee5ea7..3068e1d02b023 100644 --- a/lib/usertasks/descriptions.go +++ b/lib/usertasks/descriptions.go @@ -26,10 +26,7 @@ import ( //go:embed descriptions/*.md var descriptionsFS embed.FS -// DescriptionForDiscoverEC2Issue returns the description of the issue and fixing steps. -// The returned string contains a markdown document. -// If issue type is not recognized or doesn't have a specific description, them an empty string is returned. -func DescriptionForDiscoverEC2Issue(issueType string) string { +func loadIssueDescription(issueType string) string { filename := fmt.Sprintf("descriptions/%s.md", issueType) bs, err := descriptionsFS.ReadFile(filename) if err != nil { @@ -37,3 +34,17 @@ func DescriptionForDiscoverEC2Issue(issueType string) string { } return string(bs) } + +// DescriptionForDiscoverEC2Issue returns the description of the issue and fixing steps. +// The returned string contains a markdown document. +// If issue type is not recognized or doesn't have a specific description, them an empty string is returned. +func DescriptionForDiscoverEC2Issue(issueType string) string { + return loadIssueDescription(issueType) +} + +// DescriptionForDiscoverEKSIssue returns the description of the issue and fixing steps. +// The returned string contains a markdown document. +// If issue type is not recognized or doesn't have a specific description, them an empty string is returned. +func DescriptionForDiscoverEKSIssue(issueType string) string { + return loadIssueDescription(issueType) +} diff --git a/lib/usertasks/descriptions/eks-agent-not-connecting.md b/lib/usertasks/descriptions/eks-agent-not-connecting.md new file mode 100644 index 0000000000000..60d2a0b2c02a9 --- /dev/null +++ b/lib/usertasks/descriptions/eks-agent-not-connecting.md @@ -0,0 +1,8 @@ +The process of automatically enrolling EKS Clusters into Teleport, starts by installing the [`teleport-kube-agent`](https://goteleport.com/docs/reference/helm-reference/teleport-kube-agent/) to the cluster. + +If the installation is successful, the EKS Cluster will appear in your Resources list. + +However, the following EKS Clusters did not automatically enrolled. +This usually happens when the installation is taking too long or there was an error preventing the HELM chart installation. + +Open the Teleport Agent to get more information. \ No newline at end of file diff --git a/lib/usertasks/descriptions/eks-authentication-mode-unsupported.md b/lib/usertasks/descriptions/eks-authentication-mode-unsupported.md new file mode 100644 index 0000000000000..c6d3f55e569c8 --- /dev/null +++ b/lib/usertasks/descriptions/eks-authentication-mode-unsupported.md @@ -0,0 +1,3 @@ +Teleport uses the Amazon EKS API to install the Teleport Kubernetes Agent. + +Please enable API (or API and Config Map) authentication mode in the following EKS Clusters so that they can be automatically enrolled. \ No newline at end of file diff --git a/lib/usertasks/descriptions/eks-cluster-unreachable.md b/lib/usertasks/descriptions/eks-cluster-unreachable.md new file mode 100644 index 0000000000000..f1cf31beed18f --- /dev/null +++ b/lib/usertasks/descriptions/eks-cluster-unreachable.md @@ -0,0 +1,5 @@ +The EKS Cluster must be accessible from the Teleport Auth Service in order for Teleport to deploy the Teleport Kubernetes Agent. + +The following EKS Clusters couldn't be accessed. + +Ensure their network endpoint access configuration allows access from Teleport. \ No newline at end of file diff --git a/lib/usertasks/descriptions/eks-missing-endpoint-public-access.md b/lib/usertasks/descriptions/eks-missing-endpoint-public-access.md new file mode 100644 index 0000000000000..d1e2713c9b75c --- /dev/null +++ b/lib/usertasks/descriptions/eks-missing-endpoint-public-access.md @@ -0,0 +1,3 @@ +The EKS Cluster must be publicly accessible in order for Teleport to deploy the Teleport Kubernetes Agent. + +You can enable the public endpoint by accessing the Manage Endpoint Access. \ No newline at end of file diff --git a/lib/usertasks/descriptions/eks-status-not-active.md b/lib/usertasks/descriptions/eks-status-not-active.md new file mode 100644 index 0000000000000..831f22ba99f15 --- /dev/null +++ b/lib/usertasks/descriptions/eks-status-not-active.md @@ -0,0 +1,3 @@ +Only EKS Clusters whose status is active can be automatically enrolled into teleport. + +The following are not active. \ No newline at end of file diff --git a/lib/usertasks/descriptions_test.go b/lib/usertasks/descriptions_test.go index 30a358ae1ea9d..6d0d2f4cd371f 100644 --- a/lib/usertasks/descriptions_test.go +++ b/lib/usertasks/descriptions_test.go @@ -30,4 +30,7 @@ func TestAllDescriptions(t *testing.T) { for _, issueType := range usertasksapi.DiscoverEC2IssueTypes { require.NotEmpty(t, DescriptionForDiscoverEC2Issue(issueType), "issue type %q is missing descriptions/%s.md file", issueType, issueType) } + for _, issueType := range usertasksapi.DiscoverEKSIssueTypes { + require.NotEmpty(t, DescriptionForDiscoverEKSIssue(issueType), "issue type %q is missing descriptions/%s.md file", issueType, issueType) + } } diff --git a/lib/usertasks/urls.go b/lib/usertasks/urls.go new file mode 100644 index 0000000000000..1f95960af0cb2 --- /dev/null +++ b/lib/usertasks/urls.go @@ -0,0 +1,174 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package usertasks + +import ( + "net/url" + "path" + + usertasksv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/usertasks/v1" + usertasksapi "github.com/gravitational/teleport/api/types/usertasks" +) + +// UserTaskDiscoverEKSWithURLs contains the clusters that failed to auto-enroll into the cluster. +type UserTaskDiscoverEKSWithURLs struct { + *usertasksv1.DiscoverEKS + // Clusters maps a cluster name to the result of enrolling that cluster into teleport. + Clusters map[string]*DiscoverEKSClusterWithURLs `json:"clusters,omitempty"` +} + +// DiscoverEKSClusterWithURLs contains the result of enrolling an AWS EKS Cluster. +type DiscoverEKSClusterWithURLs struct { + *usertasksv1.DiscoverEKSCluster + + // ResourceURL is the Amazon Web Console URL to access this EKS Cluster. + // Always present. + // Format: https://console.aws.amazon.com/eks/home?region=#/clusters/ + ResourceURL string `json:"resourceUrl,omitempty"` + + // OpenTeleportAgentURL is the URL to open the Teleport Agent StatefulSet in Amazon EKS Web Console. + // Present when issue is of type eks-agent-not-connecting. + // Format: https://console.aws.amazon.com/eks/home?region=#/clusters//statefulsets/teleport-kube-agent?namespace=teleport-agent + OpenTeleportAgentURL string `json:"openTeleportAgentUrl,omitempty"` + + // ManageAccessURL is the URL to open the EKS in Amazon Web Console, in the Manage Access page. + // Present when issue is of type eks-authentication-mode-unsupported. + // Format: https://console.aws.amazon.com/eks/home?region=#/clusters//manage-access + ManageAccessURL string `json:"manageAccessUrl,omitempty"` + + // ManageEndpointAccessURL is the URL to open the EKS in Amazon Web Console, in the Manage Endpoint Access page. + // Present when issue is of type eks-cluster-unreachable and eks-missing-endpoint-public-access. + // Format: https://console.aws.amazon.com/eks/home?region=#/clusters//manage-endpoint-access + ManageEndpointAccessURL string `json:"manageEndpointAccessUrl,omitempty"` + + // ManageClusterURL is the URL to open the EKS Cluster in Amazon Web Console. + // Present when issue is of type eks-status-not-active. + // Format: https://console.aws.amazon.com/eks/home?region=#/clusters/ + ManageClusterURL string `json:"manageClusterUrl,omitempty"` +} + +func withEKSClusterIssueURL(metadata *usertasksv1.UserTask, cluster *usertasksv1.DiscoverEKSCluster) *DiscoverEKSClusterWithURLs { + ret := &DiscoverEKSClusterWithURLs{ + DiscoverEKSCluster: cluster, + } + clusterBaseURL := url.URL{ + Scheme: "https", + Host: "console.aws.amazon.com", + Path: path.Join("eks", "home"), + Fragment: "/clusters/" + cluster.GetName(), + RawQuery: url.Values{ + "region": []string{metadata.Spec.DiscoverEks.GetRegion()}, + }.Encode(), + } + + ret.ResourceURL = clusterBaseURL.String() + + switch metadata.Spec.IssueType { + case usertasksapi.AutoDiscoverEKSIssueAgentNotConnecting: + clusterBaseURL.Fragment = clusterBaseURL.Fragment + "/statefulsets/teleport-kube-agent?namespace=teleport-agent" + ret.OpenTeleportAgentURL = clusterBaseURL.String() + + case usertasksapi.AutoDiscoverEKSIssueAuthenticationModeUnsupported: + clusterBaseURL.Fragment = clusterBaseURL.Fragment + "/manage-access" + ret.ManageAccessURL = clusterBaseURL.String() + + case usertasksapi.AutoDiscoverEKSIssueClusterUnreachable, usertasksapi.AutoDiscoverEKSIssueMissingEndpoingPublicAccess: + clusterBaseURL.Fragment = clusterBaseURL.Fragment + "/manage-endpoint-access" + ret.ManageEndpointAccessURL = clusterBaseURL.String() + + case usertasksapi.AutoDiscoverEKSIssueStatusNotActive: + ret.ManageClusterURL = clusterBaseURL.String() + } + + return ret +} + +// EKSClustersWithURLs takes a UserTask and enriches the cluster list with URLs. +// Currently, the following URLs will be added: +// - ResourceURL: a link to open the instance in Amazon Web Console. +// The following URLs might be added depending on the issue type: +// - OpenTeleportAgentURL: links directly to the statefulset created during the helm installation +// - ManageAccessURL: links to the Manage Access screen in the Amazon EKS Web Console, for the current EKS Cluster. +// - ManageEndpointAccessURL: links to the Manage Endpoint Access screen in the Amazon EKS Web Console, for the current EKS Cluster. +// - ManageClusterURL: links to the EKS Cluster. +func EKSClustersWithURLs(ut *usertasksv1.UserTask) *UserTaskDiscoverEKSWithURLs { + clusters := ut.Spec.GetDiscoverEks().GetClusters() + clustersWithURLs := make(map[string]*DiscoverEKSClusterWithURLs, len(clusters)) + + for clusterName, cluster := range clusters { + clustersWithURLs[clusterName] = withEKSClusterIssueURL(ut, cluster) + } + + return &UserTaskDiscoverEKSWithURLs{ + DiscoverEKS: ut.Spec.GetDiscoverEks(), + Clusters: clustersWithURLs, + } +} + +// UserTaskDiscoverEC2WithURLs contains the instances that failed to auto-enroll into the cluster. +type UserTaskDiscoverEC2WithURLs struct { + *usertasksv1.DiscoverEC2 + // Instances maps the instance ID name to the result of enrolling that instance into teleport. + Instances map[string]*DiscoverEC2InstanceWithURLs `json:"clusters,omitempty"` +} + +// DiscoverEC2InstanceWithURLs contains the result of enrolling an AWS EC2 Instance. +type DiscoverEC2InstanceWithURLs struct { + *usertasksv1.DiscoverEC2Instance + + // ResourceURL is the Amazon Web Console URL to access this EC2 Instance. + // Always present. + // Format: https://console.aws.amazon.com/ec2/home?region=#InstanceDetails:instanceId= + ResourceURL string `json:"resourceUrl,omitempty"` +} + +func withEC2InstanceIssueURL(metadata *usertasksv1.UserTask, instance *usertasksv1.DiscoverEC2Instance) *DiscoverEC2InstanceWithURLs { + ret := &DiscoverEC2InstanceWithURLs{ + DiscoverEC2Instance: instance, + } + instanceBaseURL := url.URL{ + Scheme: "https", + Host: "console.aws.amazon.com", + Path: path.Join("ec2", "home"), + Fragment: "InstanceDetails:instanceId=" + instance.GetInstanceId(), + RawQuery: url.Values{ + "region": []string{metadata.Spec.DiscoverEc2.GetRegion()}, + }.Encode(), + } + ret.ResourceURL = instanceBaseURL.String() + + return ret +} + +// EC2InstancesWithURLs takes a UserTask and enriches the instance list with URLs. +// Currently, the following URLs will be added: +// - ResourceURL: a link to open the instance in Amazon Web Console. +func EC2InstancesWithURLs(ut *usertasksv1.UserTask) *UserTaskDiscoverEC2WithURLs { + instances := ut.Spec.GetDiscoverEc2().GetInstances() + instancesWithURLs := make(map[string]*DiscoverEC2InstanceWithURLs, len(instances)) + + for instanceID, instance := range instances { + instancesWithURLs[instanceID] = withEC2InstanceIssueURL(ut, instance) + } + + return &UserTaskDiscoverEC2WithURLs{ + DiscoverEC2: ut.Spec.GetDiscoverEc2(), + Instances: instancesWithURLs, + } +} diff --git a/lib/usertasks/urls_test.go b/lib/usertasks/urls_test.go new file mode 100644 index 0000000000000..74f8e6c065fb3 --- /dev/null +++ b/lib/usertasks/urls_test.go @@ -0,0 +1,151 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package usertasks + +import ( + "testing" + + "github.com/stretchr/testify/require" + + usertasksv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/usertasks/v1" + usertasksapi "github.com/gravitational/teleport/api/types/usertasks" +) + +func TestEKSURLs(t *testing.T) { + clusterName := "my-cluster" + dummyCluster := &usertasksv1.DiscoverEKSCluster{Name: clusterName} + baseClusterData := &usertasksv1.DiscoverEKS{ + Region: "us-east-1", + Clusters: map[string]*usertasksv1.DiscoverEKSCluster{ + clusterName: dummyCluster, + }, + } + + for _, tt := range []struct { + name string + issueType string + expectedEKSClusterWithURL *DiscoverEKSClusterWithURLs + expected *UserTaskDiscoverEKSWithURLs + }{ + { + name: "url for eks agent not connecting", + issueType: usertasksapi.AutoDiscoverEKSIssueAgentNotConnecting, + expectedEKSClusterWithURL: &DiscoverEKSClusterWithURLs{ + ResourceURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster", + OpenTeleportAgentURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster/statefulsets/teleport-kube-agent?namespace=teleport-agent", + }, + }, + { + name: "url for eks authentication mode unsupported", + issueType: usertasksapi.AutoDiscoverEKSIssueAuthenticationModeUnsupported, + expectedEKSClusterWithURL: &DiscoverEKSClusterWithURLs{ + ResourceURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster", + ManageAccessURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster/manage-access", + }, + }, + { + name: "url for eks cluster unreachable", + issueType: usertasksapi.AutoDiscoverEKSIssueClusterUnreachable, + expectedEKSClusterWithURL: &DiscoverEKSClusterWithURLs{ + ResourceURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster", + ManageEndpointAccessURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster/manage-endpoint-access", + }, + }, + { + name: "url for eks missing endpoint public access", + issueType: usertasksapi.AutoDiscoverEKSIssueMissingEndpoingPublicAccess, + expectedEKSClusterWithURL: &DiscoverEKSClusterWithURLs{ + ResourceURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster", + ManageEndpointAccessURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster/manage-endpoint-access", + }, + }, + { + name: "url for eks cluster status not active", + issueType: usertasksapi.AutoDiscoverEKSIssueStatusNotActive, + expectedEKSClusterWithURL: &DiscoverEKSClusterWithURLs{ + ResourceURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster", + ManageClusterURL: "https://console.aws.amazon.com/eks/home?region=us-east-1#/clusters/my-cluster", + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + clusterWithURL := tt.expectedEKSClusterWithURL + clusterWithURL.DiscoverEKSCluster = dummyCluster + expected := &UserTaskDiscoverEKSWithURLs{ + DiscoverEKS: baseClusterData, + Clusters: map[string]*DiscoverEKSClusterWithURLs{ + clusterName: clusterWithURL, + }, + } + + got := EKSClustersWithURLs(&usertasksv1.UserTask{ + Spec: &usertasksv1.UserTaskSpec{ + IssueType: tt.issueType, + DiscoverEks: baseClusterData, + }, + }) + require.Equal(t, expected, got) + }) + } +} + +func TestEC2URLs(t *testing.T) { + instanceID := "i-12345678" + dummyInstance := &usertasksv1.DiscoverEC2Instance{InstanceId: instanceID} + baseInstancesData := &usertasksv1.DiscoverEC2{ + Region: "us-east-1", + Instances: map[string]*usertasksv1.DiscoverEC2Instance{ + instanceID: dummyInstance, + }, + } + + for _, tt := range []struct { + name string + issueType string + expectedEC2InstanceWithURL *DiscoverEC2InstanceWithURLs + expected *UserTaskDiscoverEC2WithURLs + }{ + { + name: "url for ec2 resource", + issueType: usertasksapi.AutoDiscoverEC2IssueSSMScriptFailure, + expectedEC2InstanceWithURL: &DiscoverEC2InstanceWithURLs{ + ResourceURL: "https://console.aws.amazon.com/ec2/home?region=us-east-1#InstanceDetails:instanceId=i-12345678", + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + instanceWithURL := tt.expectedEC2InstanceWithURL + instanceWithURL.DiscoverEC2Instance = dummyInstance + expected := &UserTaskDiscoverEC2WithURLs{ + DiscoverEC2: baseInstancesData, + Instances: map[string]*DiscoverEC2InstanceWithURLs{ + instanceID: instanceWithURL, + }, + } + + got := EC2InstancesWithURLs(&usertasksv1.UserTask{ + Spec: &usertasksv1.UserTaskSpec{ + IssueType: tt.issueType, + DiscoverEc2: baseInstancesData, + }, + }) + require.Equal(t, expected, got) + }) + } +} diff --git a/lib/web/ui/usertask.go b/lib/web/ui/usertask.go index 02b174aeb1782..14fc86a8bd71f 100644 --- a/lib/web/ui/usertask.go +++ b/lib/web/ui/usertask.go @@ -24,6 +24,7 @@ import ( "github.com/gravitational/trace" usertasksv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/usertasks/v1" + apiusertasks "github.com/gravitational/teleport/api/types/usertasks" "github.com/gravitational/teleport/lib/usertasks" ) @@ -51,7 +52,9 @@ type UserTaskDetail struct { // Description is a markdown document that explains the issue and how to fix it. Description string `json:"description,omitempty"` // DiscoverEC2 contains the task details for the DiscoverEC2 tasks. - DiscoverEC2 *usertasksv1.DiscoverEC2 `json:"discoverEc2,omitempty"` + DiscoverEC2 *usertasks.UserTaskDiscoverEC2WithURLs `json:"discoverEc2,omitempty"` + // DiscoverEKS contains the task details for the DiscoverEKS tasks. + DiscoverEKS *usertasks.UserTaskDiscoverEKSWithURLs `json:"discoverEks,omitempty"` } // UpdateUserTaskStateRequest is a request to update a UserTask @@ -92,10 +95,24 @@ func MakeUserTasks(uts []*usertasksv1.UserTask) []UserTask { // MakeDetailedUserTask creates a UI UserTask representation containing all the details. func MakeDetailedUserTask(ut *usertasksv1.UserTask) UserTaskDetail { + var description string + var discoverEKS *usertasks.UserTaskDiscoverEKSWithURLs + var discoverEC2 *usertasks.UserTaskDiscoverEC2WithURLs + + switch ut.GetSpec().GetTaskType() { + case apiusertasks.TaskTypeDiscoverEC2: + description = usertasks.DescriptionForDiscoverEC2Issue(ut.GetSpec().GetIssueType()) + discoverEC2 = usertasks.EC2InstancesWithURLs(ut) + case apiusertasks.TaskTypeDiscoverEKS: + description = usertasks.DescriptionForDiscoverEKSIssue(ut.GetSpec().GetIssueType()) + discoverEKS = usertasks.EKSClustersWithURLs(ut) + } + return UserTaskDetail{ UserTask: MakeUserTask(ut), - Description: usertasks.DescriptionForDiscoverEC2Issue(ut.GetSpec().GetIssueType()), - DiscoverEC2: ut.GetSpec().GetDiscoverEc2(), + Description: description, + DiscoverEC2: discoverEC2, + DiscoverEKS: discoverEKS, } } From 2aed5bf6da0e38721d85e8d43baad57cc5c44745 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 9 Jan 2025 11:41:28 -0500 Subject: [PATCH 27/45] Remove logrus (#50829) * Remove all direct logrus usage from teleport module The only remaining use of logrus is from integrations, which is unfortunately imported by teleport.e, and prevents logrus from being moved to an indirect dependency. The logrus formatter and initialization of the logrus logger will remain in place until integrations is using slog. To prevent any accidental inclusions of logrus within the teleport module the depguard rules have been updated to prohibit importing logrus. The rules also include prohibit a few common log packages that tools like gopls might automatically import. * Refactor logger initialization Consolidates configuring of global loggers to a single function. This is mainly to facilitate configuring the logger for teleport scp, but will also allow us to remove the copy of logger initialization that currently exists in integrations/lib/logger. * fix: document ValidateFields * fix: remove copied yaml tags * fix: update file path comment --- .golangci.yml | 29 ++++-- e | 2 +- integration/helpers/instance.go | 9 -- integration/hostuser_test.go | 4 +- integrations/lib/logger/logger.go | 21 ++--- lib/client/api.go | 20 +++- lib/config/configuration.go | 81 ++-------------- lib/service/service.go | 5 - lib/service/service_test.go | 1 - lib/service/servicecfg/config.go | 15 --- lib/service/servicecfg/config_test.go | 23 ++--- lib/utils/log/formatter_test.go | 2 +- lib/utils/log/handle_state.go | 8 -- lib/utils/log/levels.go | 54 ----------- lib/utils/log/log.go | 128 ++++++++++++++++++++++++++ lib/utils/log/logrus_formatter.go | 61 ------------ lib/utils/log/slog.go | 92 ++++++++++++++++++ lib/utils/log/slog_text_handler.go | 7 -- lib/utils/log/syslog.go | 35 +++++++ lib/utils/{ => log}/syslog_windows.go | 8 +- lib/utils/syslog.go | 77 ---------------- tool/tbot/spiffe.go | 32 ++++++- tool/tctl/common/resource_command.go | 22 ++--- tool/teleport/common/teleport.go | 16 +++- tool/teleport/testenv/test_server.go | 2 +- tool/tsh/common/tsh_helper_test.go | 4 +- tool/tsh/common/tsh_test.go | 4 +- 27 files changed, 380 insertions(+), 382 deletions(-) delete mode 100644 lib/utils/log/levels.go create mode 100644 lib/utils/log/log.go create mode 100644 lib/utils/log/syslog.go rename lib/utils/{ => log}/syslog_windows.go (81%) delete mode 100644 lib/utils/syslog.go diff --git a/.golangci.yml b/.golangci.yml index 229a5838f2462..98859bad6c7d9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -99,10 +99,6 @@ linters-settings: desc: 'use "github.com/google/uuid" instead' - pkg: github.com/pborman/uuid desc: 'use "github.com/google/uuid" instead' - - pkg: github.com/siddontang/go-log/log - desc: 'use "github.com/sirupsen/logrus" instead' - - pkg: github.com/siddontang/go/log - desc: 'use "github.com/sirupsen/logrus" instead' - pkg: github.com/tj/assert desc: 'use "github.com/stretchr/testify/assert" instead' - pkg: go.uber.org/atomic @@ -117,16 +113,29 @@ linters-settings: desc: 'use "github.com/gravitational/teleport/lib/msgraph" instead' - pkg: github.com/cloudflare/cfssl desc: 'use "crypto" or "x/crypto" instead' - # Prevent logrus from being imported by api and e. Once everything in teleport has been converted - # to use log/slog this should be moved into the main block above. - logrus: + # Prevent importing any additional logging libraries. + logging: files: - - '**/api/**' - - '**/e/**' - - '**/lib/srv/**' + # Integrations are still allowed to use logrus becuase they haven't + # been converted to slog yet. Once they use slog, remove this exception. + - '!**/integrations/**' + # The log package still contains the logrus formatter consumed by the integrations. + # Remove this exception when said formatter is deleted. + - '!**/lib/utils/log/**' + - '!**/lib/utils/cli.go' deny: - pkg: github.com/sirupsen/logrus desc: 'use "log/slog" instead' + - pkg: github.com/siddontang/go-log/log + desc: 'use "log/slog" instead' + - pkg: github.com/siddontang/go/log + desc: 'use "log/slog" instead' + - pkg: github.com/mailgun/log + desc: 'use "log/slog" instead' + - pkg: github.com/saferwall/pe/log + desc: 'use "log/slog" instead' + - pkg: golang.org/x/exp/slog + desc: 'use "log/slog" instead' # Prevent importing internal packages in client tools or packages containing # common interfaces consumed by them that are known to bloat binaries or break builds # because they only support a single platform. diff --git a/e b/e index b486de24a443a..498f643ea9033 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit b486de24a443a9f8eb3e349009af14d11814ff5c +Subproject commit 498f643ea9033b1235359d83c310caadb18305d2 diff --git a/integration/helpers/instance.go b/integration/helpers/instance.go index 5f652c77b4eea..6d375387a02f6 100644 --- a/integration/helpers/instance.go +++ b/integration/helpers/instance.go @@ -40,7 +40,6 @@ import ( "github.com/gorilla/websocket" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "golang.org/x/crypto/ssh" @@ -327,10 +326,6 @@ type InstanceConfig struct { Priv []byte // Pub is SSH public key of the instance Pub []byte - // Log specifies the logger - // Deprecated: Use Logger instead - // TODO(tross): Delete when e is updated - Log utils.Logger // Logger specifies the logger Logger *slog.Logger // Ports is a collection of instance ports. @@ -354,10 +349,6 @@ func NewInstance(t *testing.T, cfg InstanceConfig) *TeleInstance { cfg.Listeners = StandardListenerSetup(t, &cfg.Fds) } - if cfg.Log == nil { - cfg.Log = logrus.New() - } - if cfg.Logger == nil { cfg.Logger = slog.New(logutils.DiscardHandler{}) } diff --git a/integration/hostuser_test.go b/integration/hostuser_test.go index b5b045c2840b3..f917a95f872a5 100644 --- a/integration/hostuser_test.go +++ b/integration/hostuser_test.go @@ -637,7 +637,7 @@ func TestRootLoginAsHostUser(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) // Create a user that can create a host user. @@ -735,7 +735,7 @@ func TestRootStaticHostUsers(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) require.NoError(t, instance.Create(t, nil, false, nil)) diff --git a/integrations/lib/logger/logger.go b/integrations/lib/logger/logger.go index a101727ef31e6..7422f03ff906c 100644 --- a/integrations/lib/logger/logger.go +++ b/integrations/lib/logger/logger.go @@ -34,15 +34,6 @@ import ( logutils "github.com/gravitational/teleport/lib/utils/log" ) -// These values are meant to be kept in sync with teleport/lib/config. -// (We avoid importing that package here because integrations must not require CGo) -const ( - // logFileDefaultMode is the preferred permissions mode for log file. - logFileDefaultMode fs.FileMode = 0o644 - // logFileDefaultFlag is the preferred flags set to log file. - logFileDefaultFlag = os.O_WRONLY | os.O_CREATE | os.O_APPEND -) - type Config struct { Output string `toml:"output"` Severity string `toml:"severity"` @@ -108,8 +99,16 @@ func Setup(conf Config) error { } // NewSLogLogger builds a slog.Logger from the logger.Config. -// TODO: this code is adapted from `config.applyLogConfig`, we'll want to deduplicate the logic next time we refactor the logging setup +// TODO(tross): Defer logging initialization to logutils.Initialize and use the +// global slog loggers once integrations has been updated to use slog. func (conf Config) NewSLogLogger() (*slog.Logger, error) { + const ( + // logFileDefaultMode is the preferred permissions mode for log file. + logFileDefaultMode fs.FileMode = 0o644 + // logFileDefaultFlag is the preferred flags set to log file. + logFileDefaultFlag = os.O_WRONLY | os.O_CREATE | os.O_APPEND + ) + var w io.Writer switch conf.Output { case "": @@ -120,7 +119,7 @@ func (conf Config) NewSLogLogger() (*slog.Logger, error) { w = logutils.NewSharedWriter(os.Stdout) case teleport.Syslog: w = os.Stderr - sw, err := utils.NewSyslogWriter() + sw, err := logutils.NewSyslogWriter() if err != nil { slog.Default().ErrorContext(context.Background(), "Failed to switch logging to syslog", "error", err) break diff --git a/lib/client/api.go b/lib/client/api.go index 68084d4833089..ed94462aa9c73 100644 --- a/lib/client/api.go +++ b/lib/client/api.go @@ -35,6 +35,7 @@ import ( "slices" "strconv" "strings" + "sync" "sync/atomic" "time" "unicode/utf8" @@ -2850,6 +2851,21 @@ type execResult struct { exitStatus int } +// sharedWriter is an [io.Writer] implementation that protects +// writes with a mutex. This allows a single [io.Writer] to be shared +// by both logrus and slog without their output clobbering each other. +type sharedWriter struct { + mu sync.Mutex + io.Writer +} + +func (s *sharedWriter) Write(p []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + return s.Writer.Write(p) +} + // runCommandOnNodes executes a given bash command on a bunch of remote nodes. func (tc *TeleportClient) runCommandOnNodes(ctx context.Context, clt *ClusterClient, nodes []TargetNode, command []string) error { cluster := clt.ClusterName() @@ -2909,10 +2925,10 @@ func (tc *TeleportClient) runCommandOnNodes(ctx context.Context, clt *ClusterCli } } - stdout := logutils.NewSharedWriter(tc.Stdout) + stdout := &sharedWriter{Writer: tc.Stdout} stderr := stdout if tc.Stdout != tc.Stderr { - stderr = logutils.NewSharedWriter(tc.Stderr) + stderr = &sharedWriter{Writer: tc.Stderr} } for _, node := range nodes { diff --git a/lib/config/configuration.go b/lib/config/configuration.go index 546a48700d4fe..dda2ac6859cf4 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -27,7 +27,6 @@ import ( "crypto/x509" "errors" "io" - "io/fs" "log/slog" "maps" "net" @@ -72,13 +71,6 @@ import ( logutils "github.com/gravitational/teleport/lib/utils/log" ) -const ( - // logFileDefaultMode is the preferred permissions mode for log file. - logFileDefaultMode fs.FileMode = 0o644 - // logFileDefaultFlag is the preferred flags set to log file. - logFileDefaultFlag = os.O_WRONLY | os.O_CREATE | os.O_APPEND -) - // CommandLineFlags stores command line flag values, it's a much simplified subset // of Teleport configuration (which is fully expressed via YAML config file) type CommandLineFlags struct { @@ -789,79 +781,22 @@ func applyAuthOrProxyAddress(fc *FileConfig, cfg *servicecfg.Config) error { } func applyLogConfig(loggerConfig Log, cfg *servicecfg.Config) error { - // TODO: this code is copied in the access plugin logging setup `logger.Config.NewSLogLogger` - // We'll want to deduplicate the logic next time we refactor the logging setup - var w io.Writer switch loggerConfig.Output { - case "": - w = os.Stderr - case "stderr", "error", "2": - w = os.Stderr - cfg.Console = io.Discard // disable console printing - case "stdout", "out", "1": - w = os.Stdout + case "stderr", "error", "2", "stdout", "out", "1": cfg.Console = io.Discard // disable console printing - case teleport.Syslog: - var err error - w, err = utils.NewSyslogWriter() - if err != nil { - slog.ErrorContext(context.Background(), "Failed to switch logging to syslog", "error", err) - break - } - default: - // Assume this is a file path. - sharedWriter, err := logutils.NewFileSharedWriter(loggerConfig.Output, logFileDefaultFlag, logFileDefaultMode) - if err != nil { - return trace.Wrap(err, "failed to init the log file shared writer") - } - w = logutils.NewWriterFinalizer[*logutils.FileSharedWriter](sharedWriter) - if err := sharedWriter.RunWatcherReopen(context.Background()); err != nil { - return trace.Wrap(err) - } - } - - level := new(slog.LevelVar) - switch strings.ToLower(loggerConfig.Severity) { - case "", "info": - level.Set(slog.LevelInfo) - case "err", "error": - level.Set(slog.LevelError) - case teleport.DebugLevel: - level.Set(slog.LevelDebug) - case "warn", "warning": - level.Set(slog.LevelWarn) - case "trace": - level.Set(logutils.TraceLevel) - default: - return trace.BadParameter("unsupported logger severity: %q", loggerConfig.Severity) } - configuredFields, err := logutils.ValidateFields(loggerConfig.Format.ExtraFields) + logger, level, err := logutils.Initialize(logutils.Config{ + Output: loggerConfig.Output, + Severity: loggerConfig.Severity, + Format: loggerConfig.Format.Output, + ExtraFields: loggerConfig.Format.ExtraFields, + EnableColors: utils.IsTerminal(os.Stderr), + }) if err != nil { return trace.Wrap(err) } - var logger *slog.Logger - switch strings.ToLower(loggerConfig.Format.Output) { - case "": - fallthrough // not set. defaults to 'text' - case "text": - logger = slog.New(logutils.NewSlogTextHandler(w, logutils.SlogTextHandlerConfig{ - Level: level, - EnableColors: utils.IsTerminal(os.Stderr), - ConfiguredFields: configuredFields, - })) - slog.SetDefault(logger) - case "json": - logger = slog.New(logutils.NewSlogJSONHandler(w, logutils.SlogJSONHandlerConfig{ - Level: level, - ConfiguredFields: configuredFields, - })) - slog.SetDefault(logger) - default: - return trace.BadParameter("unsupported log output format : %q", loggerConfig.Format.Output) - } - cfg.Logger = logger cfg.LoggerLevel = level return nil diff --git a/lib/service/service.go b/lib/service/service.go index 51d171f3737b3..7638ee5e85caf 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -56,7 +56,6 @@ import ( "github.com/jonboulle/clockwork" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/quic-go/quic-go" - "github.com/sirupsen/logrus" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/attribute" "golang.org/x/crypto/acme" @@ -992,10 +991,6 @@ func NewTeleport(cfg *servicecfg.Config) (*TeleportProcess, error) { } processID := fmt.Sprintf("%v", nextProcessID()) - cfg.Log = utils.WrapLogger(cfg.Log.WithFields(logrus.Fields{ - teleport.ComponentKey: teleport.Component(teleport.ComponentProcess, processID), - "pid": fmt.Sprintf("%v.%v", os.Getpid(), processID), - })) cfg.Logger = cfg.Logger.With( teleport.ComponentKey, teleport.Component(teleport.ComponentProcess, processID), "pid", fmt.Sprintf("%v.%v", os.Getpid(), processID), diff --git a/lib/service/service_test.go b/lib/service/service_test.go index 38ee9918008c4..52e59387ff580 100644 --- a/lib/service/service_test.go +++ b/lib/service/service_test.go @@ -948,7 +948,6 @@ func TestTeleportProcess_reconnectToAuth(t *testing.T) { cfg.Testing.ConnectFailureC = make(chan time.Duration, 5) cfg.Testing.ClientTimeout = time.Millisecond cfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() - cfg.Log = utils.NewLoggerForTests() cfg.Logger = utils.NewSlogLoggerForTests() process, err := NewTeleport(cfg) require.NoError(t, err) diff --git a/lib/service/servicecfg/config.go b/lib/service/servicecfg/config.go index 6a14f1ceba5d0..a89e79a8f6302 100644 --- a/lib/service/servicecfg/config.go +++ b/lib/service/servicecfg/config.go @@ -34,7 +34,6 @@ import ( "github.com/ghodss/yaml" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" "github.com/gravitational/teleport" @@ -52,7 +51,6 @@ import ( "github.com/gravitational/teleport/lib/sshca" usagereporter "github.com/gravitational/teleport/lib/usagereporter/teleport" "github.com/gravitational/teleport/lib/utils" - logutils "github.com/gravitational/teleport/lib/utils/log" ) // Config contains the configuration for all services that Teleport can run. @@ -223,10 +221,6 @@ type Config struct { // Kube is a Kubernetes API gateway using Teleport client identities. Kube KubeConfig - // Log optionally specifies the logger. - // Deprecated: use Logger instead. - Log utils.Logger - // Logger outputs messages using slog. The underlying handler respects // the user supplied logging config. Logger *slog.Logger @@ -518,10 +512,6 @@ func ApplyDefaults(cfg *Config) { cfg.Version = defaults.TeleportConfigVersionV1 - if cfg.Log == nil { - cfg.Log = utils.NewLogger() - } - if cfg.Logger == nil { cfg.Logger = slog.Default() } @@ -698,10 +688,6 @@ func applyDefaults(cfg *Config) { cfg.Console = io.Discard } - if cfg.Log == nil { - cfg.Log = logrus.StandardLogger() - } - if cfg.Logger == nil { cfg.Logger = slog.Default() } @@ -799,7 +785,6 @@ func verifyEnabledService(cfg *Config) error { // If called after `config.ApplyFileConfig` or `config.Configure` it will also // change the global loggers. func (c *Config) SetLogLevel(level slog.Level) { - c.Log.SetLevel(logutils.SlogLevelToLogrusLevel(level)) c.LoggerLevel.Set(level) } diff --git a/lib/service/servicecfg/config_test.go b/lib/service/servicecfg/config_test.go index e9a6be2df4056..8ed785c0998f9 100644 --- a/lib/service/servicecfg/config_test.go +++ b/lib/service/servicecfg/config_test.go @@ -28,7 +28,6 @@ import ( "testing" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" @@ -651,44 +650,34 @@ func TestWebPublicAddr(t *testing.T) { func TestSetLogLevel(t *testing.T) { for _, test := range []struct { - logLevel slog.Level - expectedLogrusLevel logrus.Level + logLevel slog.Level }{ { - logLevel: logutils.TraceLevel, - expectedLogrusLevel: logrus.TraceLevel, + logLevel: logutils.TraceLevel, }, { - logLevel: slog.LevelDebug, - expectedLogrusLevel: logrus.DebugLevel, + logLevel: slog.LevelDebug, }, { - logLevel: slog.LevelInfo, - expectedLogrusLevel: logrus.InfoLevel, + logLevel: slog.LevelInfo, }, { - logLevel: slog.LevelWarn, - expectedLogrusLevel: logrus.WarnLevel, + logLevel: slog.LevelWarn, }, { - logLevel: slog.LevelError, - expectedLogrusLevel: logrus.ErrorLevel, + logLevel: slog.LevelError, }, } { t.Run(test.logLevel.String(), func(t *testing.T) { // Create a configuration with local loggers to avoid modifying the // global instances. c := &Config{ - Log: logrus.New(), Logger: slog.New(logutils.NewSlogTextHandler(io.Discard, logutils.SlogTextHandlerConfig{})), } ApplyDefaults(c) c.SetLogLevel(test.logLevel) require.Equal(t, test.logLevel, c.LoggerLevel.Level()) - require.IsType(t, &logrus.Logger{}, c.Log) - l, _ := c.Log.(*logrus.Logger) - require.Equal(t, test.expectedLogrusLevel, l.GetLevel()) }) } } diff --git a/lib/utils/log/formatter_test.go b/lib/utils/log/formatter_test.go index e11a9f63620fb..9abb0310ba0be 100644 --- a/lib/utils/log/formatter_test.go +++ b/lib/utils/log/formatter_test.go @@ -51,7 +51,7 @@ var ( logErr = errors.New("the quick brown fox jumped really high") addr = fakeAddr{addr: "127.0.0.1:1234"} - fields = logrus.Fields{ + fields = map[string]any{ "local": &addr, "remote": &addr, "login": "llama", diff --git a/lib/utils/log/handle_state.go b/lib/utils/log/handle_state.go index c8ac9913781ca..3f88e100933ac 100644 --- a/lib/utils/log/handle_state.go +++ b/lib/utils/log/handle_state.go @@ -14,7 +14,6 @@ import ( "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" ) @@ -114,13 +113,6 @@ func (s *handleState) appendAttr(a slog.Attr) bool { } } return nonEmpty - case logrus.Fields: - for k, v := range fields { - if s.appendAttr(slog.Any(k, v)) { - nonEmpty = true - } - } - return nonEmpty } } diff --git a/lib/utils/log/levels.go b/lib/utils/log/levels.go deleted file mode 100644 index 747561ffb155b..0000000000000 --- a/lib/utils/log/levels.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package log - -import ( - "log/slog" - - "github.com/sirupsen/logrus" -) - -// SupportedLevelsText lists the supported log levels in their text -// representation. All strings are in uppercase. -var SupportedLevelsText = []string{ - TraceLevelText, - slog.LevelDebug.String(), - slog.LevelInfo.String(), - slog.LevelWarn.String(), - slog.LevelError.String(), -} - -// SlogLevelToLogrusLevel converts a [slog.Level] to its equivalent -// [logrus.Level]. -func SlogLevelToLogrusLevel(level slog.Level) logrus.Level { - switch level { - case TraceLevel: - return logrus.TraceLevel - case slog.LevelDebug: - return logrus.DebugLevel - case slog.LevelInfo: - return logrus.InfoLevel - case slog.LevelWarn: - return logrus.WarnLevel - case slog.LevelError: - return logrus.ErrorLevel - default: - return logrus.FatalLevel - } -} diff --git a/lib/utils/log/log.go b/lib/utils/log/log.go new file mode 100644 index 0000000000000..2f16b902e3df6 --- /dev/null +++ b/lib/utils/log/log.go @@ -0,0 +1,128 @@ +// Teleport +// Copyright (C) 2025 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package log + +import ( + "context" + "io" + "io/fs" + "log/slog" + "os" + "strings" + + "github.com/gravitational/trace" + + "github.com/gravitational/teleport" +) + +// Config configures teleport logging +type Config struct { + // Output defines where logs go. It can be one of the following: "stderr", "stdout" or + // a path to a log file + Output string + // Severity defines how verbose the log will be. Possible values are "error", "info", "warn" + Severity string + // Format defines the output format. Possible values are 'text' and 'json'. + Format string + // ExtraFields lists the output fields from KnownFormatFields. Example format: [timestamp, component, caller] + ExtraFields []string + // EnableColors dictates if output should be colored. + EnableColors bool +} + +// Initialize configures the default global logger based on the +// provided configuration. The [slog.Logger] and [slog.LevelVar] +func Initialize(loggerConfig Config) (*slog.Logger, *slog.LevelVar, error) { + const ( + // logFileDefaultMode is the preferred permissions mode for log file. + logFileDefaultMode fs.FileMode = 0o644 + // logFileDefaultFlag is the preferred flags set to log file. + logFileDefaultFlag = os.O_WRONLY | os.O_CREATE | os.O_APPEND + ) + + var w io.Writer + level := new(slog.LevelVar) + switch loggerConfig.Output { + case "": + w = os.Stderr + case "stderr", "error", "2": + w = os.Stderr + case "stdout", "out", "1": + w = os.Stdout + case teleport.Syslog: + var err error + w, err = NewSyslogWriter() + if err != nil { + slog.ErrorContext(context.Background(), "Failed to switch logging to syslog", "error", err) + slog.SetDefault(slog.New(DiscardHandler{})) + return slog.Default(), level, nil + } + default: + // Assume a file path for all other provided output values. + sharedWriter, err := NewFileSharedWriter(loggerConfig.Output, logFileDefaultFlag, logFileDefaultMode) + if err != nil { + return nil, nil, trace.Wrap(err, "failed to init the log file shared writer") + } + w = NewWriterFinalizer(sharedWriter) + if err := sharedWriter.RunWatcherReopen(context.Background()); err != nil { + return nil, nil, trace.Wrap(err) + } + } + + switch strings.ToLower(loggerConfig.Severity) { + case "", "info": + level.Set(slog.LevelInfo) + case "err", "error": + level.Set(slog.LevelError) + case teleport.DebugLevel: + level.Set(slog.LevelDebug) + case "warn", "warning": + level.Set(slog.LevelWarn) + case "trace": + level.Set(TraceLevel) + default: + return nil, nil, trace.BadParameter("unsupported logger severity: %q", loggerConfig.Severity) + } + + configuredFields, err := ValidateFields(loggerConfig.ExtraFields) + if err != nil { + return nil, nil, trace.Wrap(err) + } + + var logger *slog.Logger + switch strings.ToLower(loggerConfig.Format) { + case "": + fallthrough // not set. defaults to 'text' + case "text": + logger = slog.New(NewSlogTextHandler(w, SlogTextHandlerConfig{ + Level: level, + EnableColors: loggerConfig.EnableColors, + ConfiguredFields: configuredFields, + })) + slog.SetDefault(logger) + case "json": + logger = slog.New(NewSlogJSONHandler(w, SlogJSONHandlerConfig{ + Level: level, + ConfiguredFields: configuredFields, + })) + slog.SetDefault(logger) + default: + return nil, nil, trace.BadParameter("unsupported log output format : %q", loggerConfig.Format) + } + + return logger, level, nil +} diff --git a/lib/utils/log/logrus_formatter.go b/lib/utils/log/logrus_formatter.go index a21d922adf809..14ad8441da7cc 100644 --- a/lib/utils/log/logrus_formatter.go +++ b/lib/utils/log/logrus_formatter.go @@ -25,7 +25,6 @@ import ( "slices" "strconv" "strings" - "unicode" "github.com/gravitational/trace" "github.com/sirupsen/logrus" @@ -76,27 +75,6 @@ func (w *writer) Bytes() []byte { return *w.b } -const ( - noColor = -1 - red = 31 - yellow = 33 - blue = 36 - gray = 37 - // LevelField is the log field that stores the verbosity. - LevelField = "level" - // ComponentField is the log field that stores the calling component. - ComponentField = "component" - // CallerField is the log field that stores the calling file and line number. - CallerField = "caller" - // TimestampField is the field that stores the timestamp the log was emitted. - TimestampField = "timestamp" - messageField = "message" - // defaultComponentPadding is a default padding for component field - defaultComponentPadding = 11 - // defaultLevelPadding is a default padding for level field - defaultLevelPadding = 4 -) - // NewDefaultTextFormatter creates a TextFormatter with // the default options set. func NewDefaultTextFormatter(enableColors bool) *TextFormatter { @@ -304,15 +282,6 @@ func (w *writer) writeError(value interface{}) { } } -func padMax(in string, chars int) string { - switch { - case len(in) < chars: - return in + strings.Repeat(" ", chars-len(in)) - default: - return in[:chars] - } -} - func (w *writer) writeField(value interface{}, color int) { if w.Len() > 0 { w.WriteByte(' ') @@ -456,33 +425,3 @@ func frameToTrace(frame runtime.Frame) trace.Trace { Line: frame.Line, } } - -var defaultFormatFields = []string{LevelField, ComponentField, CallerField, TimestampField} - -var knownFormatFields = map[string]struct{}{ - LevelField: {}, - ComponentField: {}, - CallerField: {}, - TimestampField: {}, -} - -func ValidateFields(formatInput []string) (result []string, err error) { - for _, component := range formatInput { - component = strings.TrimSpace(component) - if _, ok := knownFormatFields[component]; !ok { - return nil, trace.BadParameter("invalid log format key: %q", component) - } - result = append(result, component) - } - return result, nil -} - -// needsQuoting returns true if any non-printable characters are found. -func needsQuoting(text string) bool { - for _, r := range text { - if !unicode.IsPrint(r) { - return true - } - } - return false -} diff --git a/lib/utils/log/slog.go b/lib/utils/log/slog.go index b1b0678ec5487..46f0e13627b3e 100644 --- a/lib/utils/log/slog.go +++ b/lib/utils/log/slog.go @@ -24,7 +24,10 @@ import ( "log/slog" "reflect" "strings" + "unicode" + "github.com/gravitational/trace" + "github.com/sirupsen/logrus" oteltrace "go.opentelemetry.io/otel/trace" ) @@ -34,8 +37,56 @@ const ( // TraceLevelText is the text representation of Trace verbosity. TraceLevelText = "TRACE" + + noColor = -1 + red = 31 + yellow = 33 + blue = 36 + gray = 37 + // LevelField is the log field that stores the verbosity. + LevelField = "level" + // ComponentField is the log field that stores the calling component. + ComponentField = "component" + // CallerField is the log field that stores the calling file and line number. + CallerField = "caller" + // TimestampField is the field that stores the timestamp the log was emitted. + TimestampField = "timestamp" + messageField = "message" + // defaultComponentPadding is a default padding for component field + defaultComponentPadding = 11 + // defaultLevelPadding is a default padding for level field + defaultLevelPadding = 4 ) +// SupportedLevelsText lists the supported log levels in their text +// representation. All strings are in uppercase. +var SupportedLevelsText = []string{ + TraceLevelText, + slog.LevelDebug.String(), + slog.LevelInfo.String(), + slog.LevelWarn.String(), + slog.LevelError.String(), +} + +// SlogLevelToLogrusLevel converts a [slog.Level] to its equivalent +// [logrus.Level]. +func SlogLevelToLogrusLevel(level slog.Level) logrus.Level { + switch level { + case TraceLevel: + return logrus.TraceLevel + case slog.LevelDebug: + return logrus.DebugLevel + case slog.LevelInfo: + return logrus.InfoLevel + case slog.LevelWarn: + return logrus.WarnLevel + case slog.LevelError: + return logrus.ErrorLevel + default: + return logrus.FatalLevel + } +} + // DiscardHandler is a [slog.Handler] that discards all messages. It // is more efficient than a [slog.Handler] which outputs to [io.Discard] since // it performs zero formatting. @@ -68,6 +119,47 @@ func addTracingContextToRecord(ctx context.Context, r *slog.Record) { } } +var defaultFormatFields = []string{LevelField, ComponentField, CallerField, TimestampField} + +var knownFormatFields = map[string]struct{}{ + LevelField: {}, + ComponentField: {}, + CallerField: {}, + TimestampField: {}, +} + +// ValidateFields ensures the provided fields map to the allowed fields. An error +// is returned if any of the fields are invalid. +func ValidateFields(formatInput []string) (result []string, err error) { + for _, component := range formatInput { + component = strings.TrimSpace(component) + if _, ok := knownFormatFields[component]; !ok { + return nil, trace.BadParameter("invalid log format key: %q", component) + } + result = append(result, component) + } + return result, nil +} + +// needsQuoting returns true if any non-printable characters are found. +func needsQuoting(text string) bool { + for _, r := range text { + if !unicode.IsPrint(r) { + return true + } + } + return false +} + +func padMax(in string, chars int) string { + switch { + case len(in) < chars: + return in + strings.Repeat(" ", chars-len(in)) + default: + return in[:chars] + } +} + // getCaller retrieves source information from the attribute // and returns the file and line of the caller. The file is // truncated from the absolute path to package/filename. diff --git a/lib/utils/log/slog_text_handler.go b/lib/utils/log/slog_text_handler.go index b3bc4900ac64c..7f93a388977bb 100644 --- a/lib/utils/log/slog_text_handler.go +++ b/lib/utils/log/slog_text_handler.go @@ -27,7 +27,6 @@ import ( "sync" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" ) @@ -324,12 +323,6 @@ func (s *SlogTextHandler) WithAttrs(attrs []slog.Attr) slog.Handler { nonEmpty = true } } - case logrus.Fields: - for k, v := range fields { - if state.appendAttr(slog.Any(k, v)) { - nonEmpty = true - } - } } default: if state.appendAttr(a) { diff --git a/lib/utils/log/syslog.go b/lib/utils/log/syslog.go new file mode 100644 index 0000000000000..40896ebedc767 --- /dev/null +++ b/lib/utils/log/syslog.go @@ -0,0 +1,35 @@ +//go:build !windows +// +build !windows + +/* + * Teleport + * Copyright (C) 2023 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package log + +import ( + "io" + "log/syslog" + + "github.com/gravitational/trace" +) + +// NewSyslogWriter creates a writer that outputs to the local machine syslog. +func NewSyslogWriter() (io.Writer, error) { + writer, err := syslog.Dial("", "", syslog.LOG_WARNING, "") + return writer, trace.Wrap(err) +} diff --git a/lib/utils/syslog_windows.go b/lib/utils/log/syslog_windows.go similarity index 81% rename from lib/utils/syslog_windows.go rename to lib/utils/log/syslog_windows.go index 7812dddabb237..3d359bdc1d437 100644 --- a/lib/utils/syslog_windows.go +++ b/lib/utils/log/syslog_windows.go @@ -16,20 +16,14 @@ * along with this program. If not, see . */ -package utils +package log import ( "io" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" ) -// NewSyslogHook always returns an error on Windows. -func NewSyslogHook(io.Writer) (logrus.Hook, error) { - return nil, trace.NotImplemented("cannot use syslog on Windows") -} - // NewSyslogWriter always returns an error on Windows. func NewSyslogWriter() (io.Writer, error) { return nil, trace.NotImplemented("cannot use syslog on Windows") diff --git a/lib/utils/syslog.go b/lib/utils/syslog.go deleted file mode 100644 index 86123bda5e1c0..0000000000000 --- a/lib/utils/syslog.go +++ /dev/null @@ -1,77 +0,0 @@ -//go:build !windows -// +build !windows - -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package utils - -import ( - "io" - "log/syslog" - "os" - - "github.com/gravitational/trace" - "github.com/sirupsen/logrus" - logrusSyslog "github.com/sirupsen/logrus/hooks/syslog" -) - -// SwitchLoggingToSyslog configures the default logger to send output to syslog. -func SwitchLoggingToSyslog() error { - logger := logrus.StandardLogger() - - w, err := NewSyslogWriter() - if err != nil { - logger.Errorf("Failed to switch logging to syslog: %v.", err) - logger.SetOutput(os.Stderr) - return trace.Wrap(err) - } - - hook, err := NewSyslogHook(w) - if err != nil { - logger.Errorf("Failed to switch logging to syslog: %v.", err) - logger.SetOutput(os.Stderr) - return trace.Wrap(err) - } - - logger.ReplaceHooks(make(logrus.LevelHooks)) - logger.AddHook(hook) - logger.SetOutput(io.Discard) - - return nil -} - -// NewSyslogHook provides a [logrus.Hook] that sends output to syslog. -func NewSyslogHook(w io.Writer) (logrus.Hook, error) { - if w == nil { - return nil, trace.BadParameter("syslog writer must not be nil") - } - - sw, ok := w.(*syslog.Writer) - if !ok { - return nil, trace.BadParameter("expected a syslog writer, got %T", w) - } - - return &logrusSyslog.SyslogHook{Writer: sw}, nil -} - -// NewSyslogWriter creates a writer that outputs to the local machine syslog. -func NewSyslogWriter() (io.Writer, error) { - writer, err := syslog.Dial("", "", syslog.LOG_WARNING, "") - return writer, trace.Wrap(err) -} diff --git a/tool/tbot/spiffe.go b/tool/tbot/spiffe.go index f319505de8caa..9ba58f3c94df0 100644 --- a/tool/tbot/spiffe.go +++ b/tool/tbot/spiffe.go @@ -21,22 +21,48 @@ package main import ( "context" "fmt" + "log/slog" "time" "github.com/gravitational/trace" "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" "github.com/spiffe/go-spiffe/v2/workloadapi" - - "github.com/gravitational/teleport/lib/utils" ) +// TODO(tross/noah): Remove once go-spiff has a slog<->workloadapi.Logger adapter. +// https://github.com/spiffe/go-spiffe/issues/281 +type logger struct { + l *slog.Logger +} + +func (l logger) Debugf(format string, args ...interface{}) { + //nolint:sloglint // msg cannot be constant + l.l.DebugContext(context.Background(), fmt.Sprintf(format, args...)) +} + +func (l logger) Infof(format string, args ...interface{}) { + //nolint:sloglint // msg cannot be constant + l.l.InfoContext(context.Background(), fmt.Sprintf(format, args...)) +} + +func (l logger) Warnf(format string, args ...interface{}) { + //nolint:sloglint // msg cannot be constant + l.l.WarnContext(context.Background(), fmt.Sprintf(format, args...)) +} + +func (l logger) Errorf(format string, args ...interface{}) { + //nolint:sloglint // msg cannot be constant + l.l.ErrorContext(context.Background(), fmt.Sprintf(format, args...)) +} + func onSPIFFEInspect(ctx context.Context, path string) error { log.InfoContext(ctx, "Inspecting SPIFFE Workload API Endpoint", "path", path) source, err := workloadapi.New( ctx, // TODO(noah): Upstream PR to add slog<->workloadapi.Logger adapter. - workloadapi.WithLogger(utils.NewLogger()), + // https://github.com/spiffe/go-spiffe/issues/281 + workloadapi.WithLogger(logger{l: slog.Default()}), workloadapi.WithAddr(path), ) if err != nil { diff --git a/tool/tctl/common/resource_command.go b/tool/tctl/common/resource_command.go index 23749bc14c528..7ea28fc994402 100644 --- a/tool/tctl/common/resource_command.go +++ b/tool/tctl/common/resource_command.go @@ -507,7 +507,7 @@ func (rc *ResourceCommand) createRole(ctx context.Context, client *authclient.Cl return trace.Wrap(err) } - warnAboutKubernetesResources(rc.config.Log, role) + warnAboutKubernetesResources(ctx, rc.config.Logger, role) roleName := role.GetName() _, err = client.GetRole(ctx, roleName) @@ -536,8 +536,8 @@ func (rc *ResourceCommand) updateRole(ctx context.Context, client *authclient.Cl return trace.Wrap(err) } - warnAboutKubernetesResources(rc.config.Log, role) - warnAboutDynamicLabelsInDenyRule(rc.config.Log, role) + warnAboutKubernetesResources(ctx, rc.config.Logger, role) + warnAboutDynamicLabelsInDenyRule(ctx, rc.config.Logger, role) if _, err := client.UpdateRole(ctx, role); err != nil { return trace.Wrap(err) @@ -548,21 +548,21 @@ func (rc *ResourceCommand) updateRole(ctx context.Context, client *authclient.Cl // warnAboutKubernetesResources warns about kubernetes resources // if kubernetes_labels are set but kubernetes_resources are not. -func warnAboutKubernetesResources(logger utils.Logger, r types.Role) { +func warnAboutKubernetesResources(ctx context.Context, logger *slog.Logger, r types.Role) { role, ok := r.(*types.RoleV6) // only warn about kubernetes resources for v6 roles if !ok || role.Version != types.V6 { return } if len(role.Spec.Allow.KubernetesLabels) > 0 && len(role.Spec.Allow.KubernetesResources) == 0 { - logger.Warningf("role %q has allow.kubernetes_labels set but no allow.kubernetes_resources, this is probably a mistake. Teleport will restrict access to pods.", role.Metadata.Name) + logger.WarnContext(ctx, "role has allow.kubernetes_labels set but no allow.kubernetes_resources, this is probably a mistake - Teleport will restrict access to pods", "role", role.Metadata.Name) } if len(role.Spec.Allow.KubernetesLabels) == 0 && len(role.Spec.Allow.KubernetesResources) > 0 { - logger.Warningf("role %q has allow.kubernetes_resources set but no allow.kubernetes_labels, this is probably a mistake. kubernetes_resources won't be effective.", role.Metadata.Name) + logger.WarnContext(ctx, "role has allow.kubernetes_resources set but no allow.kubernetes_labels, this is probably a mistake - kubernetes_resources won't be effective", "role", role.Metadata.Name) } if len(role.Spec.Deny.KubernetesLabels) > 0 && len(role.Spec.Deny.KubernetesResources) > 0 { - logger.Warningf("role %q has deny.kubernetes_labels set but also has deny.kubernetes_resources set, this is probably a mistake. deny.kubernetes_resources won't be effective.", role.Metadata.Name) + logger.WarnContext(ctx, "role has deny.kubernetes_labels set but also has deny.kubernetes_resources set, this is probably a mistake - deny.kubernetes_resources won't be effective", "role", role.Metadata.Name) } } @@ -574,13 +574,13 @@ func dynamicLabelWarningMessage(r types.Role) string { // warnAboutDynamicLabelsInDenyRule warns about using dynamic/ labels in deny // rules. Only applies to existing roles as adding dynamic/ labels to deny // rules in a new role is not allowed. -func warnAboutDynamicLabelsInDenyRule(logger utils.Logger, r types.Role) { +func warnAboutDynamicLabelsInDenyRule(ctx context.Context, logger *slog.Logger, r types.Role) { if err := services.CheckDynamicLabelsInDenyRules(r); err == nil { return } else if trace.IsBadParameter(err) { - logger.Warningf(dynamicLabelWarningMessage(r)) + logger.WarnContext(ctx, "existing role has labels with the a dynamic prefix in its deny rules, this is not recommended due to the volatility of dynamic labels and is not allowed for new roles", "role", r.GetName()) } else { - logger.WithError(err).Warningf("error checking deny rules labels") + logger.WarnContext(ctx, "error checking deny rules labels", "error", err) } } @@ -2357,7 +2357,7 @@ func (rc *ResourceCommand) getCollection(ctx context.Context, client *authclient if err != nil { return nil, trace.Wrap(err) } - warnAboutDynamicLabelsInDenyRule(rc.config.Log, role) + warnAboutDynamicLabelsInDenyRule(ctx, rc.config.Logger, role) return &roleCollection{roles: []types.Role{role}}, nil case types.KindNamespace: if rc.ref.Name == "" { diff --git a/tool/teleport/common/teleport.go b/tool/teleport/common/teleport.go index 8318fcf68a45f..15e96fc949346 100644 --- a/tool/teleport/common/teleport.go +++ b/tool/teleport/common/teleport.go @@ -1014,10 +1014,22 @@ func dumpConfigFile(outputURI, contents, comment string) (string, error) { // user's privileges // // This is the entry point of "teleport scp" call (the parent process is the teleport daemon) -func onSCP(scpFlags *scp.Flags) (err error) { +func onSCP(scpFlags *scp.Flags) error { // when 'teleport scp' is executed, it cannot write logs to stderr (because // they're automatically replayed by the scp client) - utils.SwitchLoggingToSyslog() + var verbosity string + if scpFlags.Verbose { + verbosity = teleport.DebugLevel + } + _, _, err := logutils.Initialize(logutils.Config{ + Output: teleport.Syslog, + Severity: verbosity, + }) + if err != nil { + // If something went wrong, discard all logs and continue command execution. + slog.SetDefault(slog.New(logutils.DiscardHandler{})) + } + if len(scpFlags.Target) == 0 { return trace.BadParameter("teleport scp: missing an argument") } diff --git a/tool/teleport/testenv/test_server.go b/tool/teleport/testenv/test_server.go index 3e034d9fdf0d6..e4c9245c478ce 100644 --- a/tool/teleport/testenv/test_server.go +++ b/tool/teleport/testenv/test_server.go @@ -150,7 +150,7 @@ func MakeTestServer(t *testing.T, opts ...TestServerOptFunc) (process *service.T cfg.Hostname = "server01" cfg.DataDir = t.TempDir() - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() authAddr := utils.NetAddr{AddrNetwork: "tcp", Addr: NewTCPListener(t, service.ListenerAuth, &cfg.FileDescriptors)} cfg.SetToken(StaticToken) cfg.SetAuthServerAddress(authAddr) diff --git a/tool/tsh/common/tsh_helper_test.go b/tool/tsh/common/tsh_helper_test.go index 85ec86097b3bd..35250d6f54e4b 100644 --- a/tool/tsh/common/tsh_helper_test.go +++ b/tool/tsh/common/tsh_helper_test.go @@ -97,7 +97,7 @@ func (s *suite) setupRootCluster(t *testing.T, options testSuiteOptions) { cfg := servicecfg.MakeDefaultConfig() cfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() err := config.ApplyFileConfig(fileConfig, cfg) require.NoError(t, err) cfg.FileDescriptors = dynAddr.Descriptors @@ -194,7 +194,7 @@ func (s *suite) setupLeafCluster(t *testing.T, options testSuiteOptions) { cfg := servicecfg.MakeDefaultConfig() cfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() err := config.ApplyFileConfig(fileConfig, cfg) require.NoError(t, err) cfg.FileDescriptors = dynAddr.Descriptors diff --git a/tool/tsh/common/tsh_test.go b/tool/tsh/common/tsh_test.go index 2ffa313d42cb1..61dda435b127d 100644 --- a/tool/tsh/common/tsh_test.go +++ b/tool/tsh/common/tsh_test.go @@ -3856,7 +3856,7 @@ func makeTestSSHNode(t *testing.T, authAddr *utils.NetAddr, opts ...testServerOp cfg.SSH.Addr = *utils.MustParseAddr("127.0.0.1:0") cfg.SSH.PublicAddrs = []utils.NetAddr{cfg.SSH.Addr} cfg.SSH.DisableCreateHostUser = true - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() // Disabling debug service for tests so that it doesn't break if the data // directory path is too long. cfg.DebugService.Enabled = false @@ -3905,7 +3905,7 @@ func makeTestServers(t *testing.T, opts ...testServerOptFunc) (auth *service.Tel cfg.Proxy.SSHAddr = utils.NetAddr{AddrNetwork: "tcp", Addr: net.JoinHostPort("127.0.0.1", ports.Pop())} cfg.Proxy.ReverseTunnelListenAddr = utils.NetAddr{AddrNetwork: "tcp", Addr: net.JoinHostPort("127.0.0.1", ports.Pop())} cfg.Proxy.DisableWebInterface = true - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() // Disabling debug service for tests so that it doesn't break if the data // directory path is too long. cfg.DebugService.Enabled = false From 5eee08d914ee51c47ba1f1c2e9f3172b7d37adf5 Mon Sep 17 00:00:00 2001 From: "STeve (Xin) Huang" Date: Thu, 9 Jan 2025 11:55:09 -0500 Subject: [PATCH 28/45] GitHub proxy part 6.5: tsh git ssh/clone/config (#50044) * GitHub proxy part 6.5: tsh git ssh/clone/config * review comments * fix test * fix ut for lookpath * fix logger and update dependency version * go mod tidy for integrations --- go.mod | 8 +- go.sum | 26 +++- integrations/event-handler/go.mod | 2 +- integrations/event-handler/go.sum | 4 +- integrations/terraform/go.mod | 2 +- integrations/terraform/go.sum | 12 +- tool/tsh/common/git.go | 112 +++++++++++++++++- tool/tsh/common/git_clone.go | 73 ++++++++++++ tool/tsh/common/git_clone_test.go | 115 ++++++++++++++++++ tool/tsh/common/git_config.go | 184 +++++++++++++++++++++++++++++ tool/tsh/common/git_config_test.go | 184 +++++++++++++++++++++++++++++ tool/tsh/common/git_ssh.go | 86 ++++++++++++++ tool/tsh/common/git_test.go | 79 +++++++++++++ tool/tsh/common/tsh.go | 22 ++++ 14 files changed, 888 insertions(+), 21 deletions(-) create mode 100644 tool/tsh/common/git_clone.go create mode 100644 tool/tsh/common/git_clone_test.go create mode 100644 tool/tsh/common/git_config.go create mode 100644 tool/tsh/common/git_config_test.go create mode 100644 tool/tsh/common/git_ssh.go create mode 100644 tool/tsh/common/git_test.go diff --git a/go.mod b/go.mod index 77012f1103bc7..3c35132910093 100644 --- a/go.mod +++ b/go.mod @@ -101,6 +101,7 @@ require ( github.com/fxamacker/cbor/v2 v2.7.0 github.com/ghodss/yaml v1.0.0 github.com/gizak/termui/v3 v3.1.0 + github.com/go-git/go-git/v5 v5.13.1 github.com/go-jose/go-jose/v3 v3.0.3 github.com/go-ldap/ldap/v3 v3.4.10 github.com/go-logr/logr v1.4.2 @@ -308,7 +309,7 @@ require ( github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect github.com/crewjam/httperr v0.2.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect - github.com/cyphar/filepath-securejoin v0.3.4 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/danieljoos/wincred v1.2.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/daviddengcn/go-colortext v1.0.0 // indirect @@ -341,6 +342,8 @@ require ( github.com/go-errors/errors v1.4.2 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.6.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-jose/go-jose/v4 v4.0.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -402,6 +405,7 @@ require ( github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect @@ -469,6 +473,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20240311024730-e056997136bb // indirect github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22 // indirect github.com/pingcap/tidb/pkg/parser v0.0.0-20240930120915-74034d4ac243 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/xattr v0.4.10 // indirect @@ -548,6 +553,7 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect k8s.io/component-helpers v0.31.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/metrics v0.31.3 // indirect diff --git a/go.sum b/go.sum index c270f81730944..5665c4f7280c7 100644 --- a/go.sum +++ b/go.sum @@ -778,8 +778,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= -github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE= -github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= @@ -1104,8 +1104,8 @@ github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= -github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1247,6 +1247,14 @@ github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3 github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= +github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= +github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -1689,6 +1697,8 @@ github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -1973,6 +1983,8 @@ github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22 h1:2SOzvGvE8beiC1Y4g github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/tidb/pkg/parser v0.0.0-20240930120915-74034d4ac243 h1:B3pF5adXRpuEDfSKY/bV2Lw+pPKtWH4FOaAX3Jx3X54= github.com/pingcap/tidb/pkg/parser v0.0.0-20240930120915-74034d4ac243/go.mod h1:dXcO3Ts6jUVE1VwBZp3wbVdGO4pi9MXY6IvL4L1z62g= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -2081,8 +2093,8 @@ github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= @@ -3148,6 +3160,8 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/integrations/event-handler/go.mod b/integrations/event-handler/go.mod index 922ce6e72ae3c..19d919b359e39 100644 --- a/integrations/event-handler/go.mod +++ b/integrations/event-handler/go.mod @@ -115,7 +115,7 @@ require ( github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect github.com/crewjam/httperr v0.2.0 // indirect github.com/crewjam/saml v0.4.14 // indirect - github.com/cyphar/filepath-securejoin v0.3.4 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/di-wu/parser v0.3.0 // indirect github.com/di-wu/xsd-datetime v1.0.0 // indirect diff --git a/integrations/event-handler/go.sum b/integrations/event-handler/go.sum index e88669e71da35..1f0435df0d184 100644 --- a/integrations/event-handler/go.sum +++ b/integrations/event-handler/go.sum @@ -883,8 +883,8 @@ github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4= github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c= github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME= -github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= -github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod index 50246feeb9ed6..d3240ffff8135 100644 --- a/integrations/terraform/go.mod +++ b/integrations/terraform/go.mod @@ -131,7 +131,7 @@ require ( github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect github.com/crewjam/httperr v0.2.0 // indirect github.com/crewjam/saml v0.4.14 // indirect - github.com/cyphar/filepath-securejoin v0.3.4 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/di-wu/parser v0.3.0 // indirect github.com/di-wu/xsd-datetime v1.0.0 // indirect diff --git a/integrations/terraform/go.sum b/integrations/terraform/go.sum index 1a6cf422dd62e..106e4e41c759b 100644 --- a/integrations/terraform/go.sum +++ b/integrations/terraform/go.sum @@ -971,8 +971,8 @@ github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4= github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c= github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME= -github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= -github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -1088,12 +1088,12 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= +github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= -github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= +github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= diff --git a/tool/tsh/common/git.go b/tool/tsh/common/git.go index 3f43578fb4132..990ddb8f22fc7 100644 --- a/tool/tsh/common/git.go +++ b/tool/tsh/common/git.go @@ -19,24 +19,128 @@ package common import ( + "bytes" + "io" + "os/exec" + "strings" + "github.com/alecthomas/kingpin/v2" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/gravitational/trace" + + "github.com/gravitational/teleport/api/types" ) type gitCommands struct { - list *gitListCommand - login *gitLoginCommand + list *gitListCommand + login *gitLoginCommand + ssh *gitSSHCommand + config *gitConfigCommand + clone *gitCloneCommand } func newGitCommands(app *kingpin.Application) gitCommands { git := app.Command("git", "Git server commands.") cmds := gitCommands{ - login: newGitLoginCommand(git), - list: newGitListCommand(git), + login: newGitLoginCommand(git), + list: newGitListCommand(git), + ssh: newGitSSHCommand(git), + config: newGitConfigCommand(git), + clone: newGitCloneCommand(git), } // TODO(greedy52) hide the commands until all basic features are implemented. git.Hidden() cmds.login.Hidden() cmds.list.Hidden() + cmds.config.Hidden() + cmds.clone.Hidden() return cmds } + +type gitSSHURL transport.Endpoint + +func (g gitSSHURL) check() error { + switch { + case g.isGitHub(): + if err := types.ValidateGitHubOrganizationName(g.owner()); err != nil { + return trace.Wrap(err) + } + } + return nil +} + +func (g gitSSHURL) isGitHub() bool { + return g.Host == "github.com" +} + +// owner returns the first part of the path. If the path does not have an owner, +// an empty string is returned. +// +// For GitHub, owner is either the user or the organization that owns the repo. +// +// For example, if the SSH url is git@github.com:gravitational/teleport.git, the +// owner would be "gravitational". +func (g gitSSHURL) owner() string { + // g.Path may have a preceding "/" from url.Parse. + owner, _, ok := strings.Cut(strings.TrimPrefix(g.Path, "/"), "/") + if !ok { + return "" + } + return owner +} + +// parseGitSSHURL parse a Git SSH URL. +// +// Git URL Spec: +// - spec: https://git-scm.com/docs/git-clone#_git_urls +// - example: ssh://example.org/path/to/repo.git +// +// GitHub (SCP-like) URL: +// - spec: https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories +// - example: git@github.com:gravitational/teleport.git +func parseGitSSHURL(originalURL string) (*gitSSHURL, error) { + endpoint, err := transport.NewEndpoint(originalURL) + if err != nil { + return nil, trace.Wrap(err) + } + if endpoint.Protocol != "ssh" { + return nil, trace.BadParameter("unsupported git ssh URL %s", originalURL) + } + s := gitSSHURL(*endpoint) + if err := s.check(); err != nil { + return nil, trace.Wrap(err) + } + return &s, nil +} + +func execGitAndCaptureStdout(cf *CLIConf, args ...string) (string, error) { + var bufStd bytes.Buffer + if err := execGitWithStdoutAndStderr(cf, &bufStd, cf.Stderr(), args...); err != nil { + return "", trace.Wrap(err) + } + return strings.TrimSpace(bufStd.String()), nil +} + +func execGit(cf *CLIConf, args ...string) error { + return trace.Wrap(execGitWithStdoutAndStderr(cf, cf.Stdout(), cf.Stderr(), args...)) +} + +func execGitWithStdoutAndStderr(cf *CLIConf, stdout, stderr io.Writer, args ...string) error { + const gitExecutable = "git" + gitPath, err := cf.LookPath(gitExecutable) + if err != nil { + return trace.NotFound(`could not locate the executable %q. The following error occurred: +%s + +tsh requires that the %q executable to be installed. +You can install it by following the instructions at https://git-scm.com/book/en/v2/Getting-Started-Installing-Git`, + gitExecutable, err.Error(), gitExecutable) + } + logger.DebugContext(cf.Context, "Executing git command", "path", gitPath, "args", args) + cmd := exec.CommandContext(cf.Context, gitPath, args...) + cmd.Stdin = cf.Stdin() + cmd.Stdout = stdout + cmd.Stderr = stderr + return trace.Wrap(cf.RunCommand(cmd)) +} diff --git a/tool/tsh/common/git_clone.go b/tool/tsh/common/git_clone.go new file mode 100644 index 0000000000000..93d00d4134434 --- /dev/null +++ b/tool/tsh/common/git_clone.go @@ -0,0 +1,73 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "fmt" + + "github.com/alecthomas/kingpin/v2" + "github.com/gravitational/trace" +) + +// gitCloneCommand implements `tsh git clone`. +// +// This command internally executes `git clone` while setting `core.sshcommand`. +// You can generally assume the user has `git` binary installed (otherwise there +// is no point using the `git` proxy feature). +// +// TODO(greedy52) investigate using `go-git` library instead of calling `git +// clone`. +type gitCloneCommand struct { + *kingpin.CmdClause + + repository string + directory string +} + +func newGitCloneCommand(parent *kingpin.CmdClause) *gitCloneCommand { + cmd := &gitCloneCommand{ + CmdClause: parent.Command("clone", "Clone a Git repository."), + } + + cmd.Arg("repository", "Git URL of the repository to clone.").Required().StringVar(&cmd.repository) + cmd.Arg("directory", "The name of a new directory to clone into.").StringVar(&cmd.directory) + // TODO(greedy52) support passing extra args to git like --branch/--depth. + return cmd +} + +func (c *gitCloneCommand) run(cf *CLIConf) error { + u, err := parseGitSSHURL(c.repository) + if err != nil { + return trace.Wrap(err) + } + if !u.isGitHub() { + return trace.BadParameter("%s is not a GitHub repository", c.repository) + } + + sshCommand := makeGitCoreSSHCommand(cf.executablePath, u.owner()) + args := []string{ + "clone", + "--config", fmt.Sprintf("%s=%s", gitCoreSSHCommand, sshCommand), + c.repository, + } + if c.directory != "" { + args = append(args, c.directory) + } + return trace.Wrap(execGit(cf, args...)) +} diff --git a/tool/tsh/common/git_clone_test.go b/tool/tsh/common/git_clone_test.go new file mode 100644 index 0000000000000..4e27e3ac3286f --- /dev/null +++ b/tool/tsh/common/git_clone_test.go @@ -0,0 +1,115 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "context" + "os/exec" + "slices" + "testing" + + "github.com/gravitational/trace" + "github.com/stretchr/testify/require" +) + +func TestGitCloneCommand(t *testing.T) { + tests := []struct { + name string + cmd *gitCloneCommand + verifyCommand func(*exec.Cmd) error + checkError require.ErrorAssertionFunc + }{ + { + name: "success", + cmd: &gitCloneCommand{ + repository: "git@github.com:gravitational/teleport.git", + }, + verifyCommand: func(cmd *exec.Cmd) error { + expect := []string{ + "git", "clone", + "--config", "core.sshcommand=\"tsh\" git ssh --github-org gravitational", + "git@github.com:gravitational/teleport.git", + } + if !slices.Equal(expect, cmd.Args) { + return trace.CompareFailed("expect %v but got %v", expect, cmd.Args) + } + return nil + }, + checkError: require.NoError, + }, + { + name: "success with target dir", + cmd: &gitCloneCommand{ + repository: "git@github.com:gravitational/teleport.git", + directory: "target_dir", + }, + verifyCommand: func(cmd *exec.Cmd) error { + expect := []string{ + "git", "clone", + "--config", "core.sshcommand=\"tsh\" git ssh --github-org gravitational", + "git@github.com:gravitational/teleport.git", + "target_dir", + } + if !slices.Equal(expect, cmd.Args) { + return trace.CompareFailed("expect %v but got %v", expect, cmd.Args) + } + return nil + }, + checkError: require.NoError, + }, + { + name: "invalid URL", + cmd: &gitCloneCommand{ + repository: "not-a-git-ssh-url", + }, + checkError: require.Error, + }, + { + name: "unsupported Git service", + cmd: &gitCloneCommand{ + repository: "git@gitlab.com:group/project.git", + }, + checkError: require.Error, + }, + { + name: "git fails", + cmd: &gitCloneCommand{ + repository: "git@github.com:gravitational/teleport.git", + }, + verifyCommand: func(cmd *exec.Cmd) error { + return trace.BadParameter("some git error") + }, + checkError: func(t require.TestingT, err error, i ...interface{}) { + require.ErrorIs(t, err, trace.BadParameter("some git error")) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cf := &CLIConf{ + Context: context.Background(), + executablePath: "tsh", + cmdRunner: tt.verifyCommand, + lookPathOverride: "git", + } + tt.checkError(t, tt.cmd.run(cf)) + }) + } +} diff --git a/tool/tsh/common/git_config.go b/tool/tsh/common/git_config.go new file mode 100644 index 0000000000000..89771735b30b3 --- /dev/null +++ b/tool/tsh/common/git_config.go @@ -0,0 +1,184 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "fmt" + "io" + "strings" + + "github.com/alecthomas/kingpin/v2" + "github.com/gravitational/trace" +) + +// gitConfigCommand implements `tsh git config`. +// +// This command internally executes `git` commands like `git config xxx`. +// can generally assume the user has `git` binary installed (otherwise there is +// no point using the `git` proxy feature). +// +// TODO(greedy52) investigate using `go-git` library instead of calling `git +// config`. +type gitConfigCommand struct { + *kingpin.CmdClause + + action string +} + +const ( + gitConfigActionDefault = "" + gitConfigActionUpdate = "update" + gitConfigActionReset = "reset" + + // gitCoreSSHCommand is the Git config used for setting up alternative SSH + // command. For Git-proxying, the command should point to "tsh git ssh". + // + // https://git-scm.com/docs/git-config#Documentation/git-config.txt-coresshCommand + gitCoreSSHCommand = "core.sshcommand" +) + +func newGitConfigCommand(parent *kingpin.CmdClause) *gitConfigCommand { + cmd := &gitConfigCommand{ + CmdClause: parent.Command("config", "Check Teleport config on the working Git directory. Or provide an action ('update' or 'reset') to configure the Git repo."), + } + + cmd.Arg("action", "Optional action to perform. 'update' to configure the Git repo to proxy Git commands through Teleport. 'reset' to clear Teleport configuration from the Git repo."). + EnumVar(&cmd.action, gitConfigActionUpdate, gitConfigActionReset) + return cmd +} + +func (c *gitConfigCommand) run(cf *CLIConf) error { + // Make sure we are in a Git dir. + err := execGitWithStdoutAndStderr(cf, io.Discard, io.Discard, "rev-parse", "--is-inside-work-tree") + if err != nil { + // In case git is not found, return the look path error. + if trace.IsNotFound(err) { + return trace.Wrap(err) + } + // This error message is a slight alternation of the original error + // message from the above command. + return trace.BadParameter("the current directory is not a Git repository (or any of the parent directories)") + } + + switch c.action { + case gitConfigActionDefault: + return trace.Wrap(c.doCheck(cf)) + case gitConfigActionUpdate: + return trace.Wrap(c.doUpdate(cf)) + case gitConfigActionReset: + return trace.Wrap(c.doReset(cf)) + default: + return trace.BadParameter("unknown action '%v'", c.action) + } +} + +func (c *gitConfigCommand) doCheck(cf *CLIConf) error { + sshCommand, err := c.getCoreSSHCommand(cf) + if err != nil { + return trace.Wrap(err) + } + wantPrefix := makeGitCoreSSHCommand(cf.executablePath, "") + if strings.HasPrefix(sshCommand, wantPrefix) { + _, org, _ := strings.Cut(sshCommand, wantPrefix) + fmt.Fprintf(cf.Stdout(), "The current Git directory is configured with Teleport for GitHub organization %q.\n", org) + return nil + } + + c.printDirNotConfigured(cf.Stdout(), true, sshCommand) + return nil +} + +func (c *gitConfigCommand) printDirNotConfigured(w io.Writer, withUpdate bool, existingSSHCommand string) { + fmt.Fprintln(w, "The current Git directory is not configured with Teleport.") + if withUpdate { + if existingSSHCommand != "" { + fmt.Fprintf(w, "%q currently has value %q.\n", gitCoreSSHCommand, existingSSHCommand) + fmt.Fprintf(w, "Run 'tsh git config update' to configure Git directory with Teleport but %q will be overwritten.\n", gitCoreSSHCommand) + } else { + fmt.Fprintln(w, "Run 'tsh git config update' to configure it.") + } + } +} + +func (c *gitConfigCommand) doUpdate(cf *CLIConf) error { + urls, err := execGitAndCaptureStdout(cf, "ls-remote", "--get-url") + if err != nil { + return trace.Wrap(err) + } + for _, url := range strings.Split(urls, "\n") { + u, err := parseGitSSHURL(url) + if err != nil { + logger.DebugContext(cf.Context, "Skippig URL", "error", err, "url", url) + continue + } + if !u.isGitHub() { + logger.DebugContext(cf.Context, "Skippig non-GitHub host", "host", u.Host) + continue + } + + logger.DebugContext(cf.Context, "Configuring repo to use tsh.", "url", url, "owner", u.owner()) + args := []string{ + "config", "--local", + "--replace-all", gitCoreSSHCommand, + makeGitCoreSSHCommand(cf.executablePath, u.owner()), + } + if err := execGit(cf, args...); err != nil { + return trace.Wrap(err) + } + fmt.Fprintln(cf.Stdout(), "Teleport configuration added.") + return trace.Wrap(c.doCheck(cf)) + } + return trace.NotFound("no GitHub SSH URL found from 'git ls-remote --get-url'") +} + +func (c *gitConfigCommand) doReset(cf *CLIConf) error { + sshCommand, err := c.getCoreSSHCommand(cf) + if err != nil { + return trace.Wrap(err) + } + wantPrefix := makeGitCoreSSHCommand(cf.executablePath, "") + if !strings.HasPrefix(sshCommand, wantPrefix) { + c.printDirNotConfigured(cf.Stdout(), false, sshCommand) + return nil + } + + if err := execGit(cf, "config", "--local", "--unset-all", gitCoreSSHCommand); err != nil { + return trace.Wrap(err) + } + fmt.Fprintln(cf.Stdout(), "Teleport configuration removed.") + return nil +} + +func (c *gitConfigCommand) getCoreSSHCommand(cf *CLIConf) (string, error) { + return execGitAndCaptureStdout(cf, + "config", "--local", + // set default to empty to avoid non-zero exit when config is missing + "--default", "", + "--get", gitCoreSSHCommand, + ) +} + +// makeGitCoreSSHCommand generates the value for Git config "core.sshcommand". +func makeGitCoreSSHCommand(tshBin, githubOrg string) string { + // Quote the path in case it has spaces + return fmt.Sprintf("\"%s\" git ssh --github-org %s", + tshBin, + githubOrg, + ) +} diff --git a/tool/tsh/common/git_config_test.go b/tool/tsh/common/git_config_test.go new file mode 100644 index 0000000000000..b045e9342bb5f --- /dev/null +++ b/tool/tsh/common/git_config_test.go @@ -0,0 +1,184 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "bytes" + "context" + "fmt" + "os/exec" + "slices" + "testing" + + "github.com/gravitational/trace" + "github.com/stretchr/testify/require" +) + +func isGitDirCheck(cmd *exec.Cmd) bool { + return slices.Equal([]string{"git", "rev-parse", "--is-inside-work-tree"}, cmd.Args) +} +func isGitListRemoteURL(cmd *exec.Cmd) bool { + return slices.Equal([]string{"git", "ls-remote", "--get-url"}, cmd.Args) +} +func isGitConfigGetCoreSSHCommand(cmd *exec.Cmd) bool { + return slices.Equal([]string{"git", "config", "--local", "--default", "", "--get", "core.sshcommand"}, cmd.Args) +} + +type fakeGitCommandRunner struct { + dirCheckError error + coreSSHCommand string + remoteURL string + verifyCommand func(cmd *exec.Cmd) error +} + +func (f fakeGitCommandRunner) run(cmd *exec.Cmd) error { + switch { + case isGitDirCheck(cmd): + return f.dirCheckError + case isGitConfigGetCoreSSHCommand(cmd): + fmt.Fprintln(cmd.Stdout, f.coreSSHCommand) + return nil + case isGitListRemoteURL(cmd): + fmt.Fprintln(cmd.Stdout, f.remoteURL) + return nil + default: + if f.verifyCommand != nil { + return trace.Wrap(f.verifyCommand(cmd)) + } + return trace.NotFound("unknown command") + } +} + +func TestGitConfigCommand(t *testing.T) { + tests := []struct { + name string + cmd *gitConfigCommand + fakeRunner fakeGitCommandRunner + checkError require.ErrorAssertionFunc + checkOutputContains string + }{ + { + name: "not a git dir", + cmd: &gitConfigCommand{}, + fakeRunner: fakeGitCommandRunner{ + dirCheckError: trace.BadParameter("not a git dir"), + }, + checkError: func(t require.TestingT, err error, i ...interface{}) { + require.Error(t, err) + require.Contains(t, err.Error(), "the current directory is not a Git repository") + }, + }, + { + name: "check", + cmd: &gitConfigCommand{}, + fakeRunner: fakeGitCommandRunner{ + coreSSHCommand: makeGitCoreSSHCommand("tsh", "org"), + }, + checkError: require.NoError, + checkOutputContains: "is configured with Teleport for GitHub organization \"org\"", + }, + { + name: "check not configured", + cmd: &gitConfigCommand{}, + fakeRunner: fakeGitCommandRunner{ + coreSSHCommand: "", + }, + checkError: require.NoError, + checkOutputContains: "is not configured", + }, + { + name: "update success", + cmd: &gitConfigCommand{ + action: gitConfigActionUpdate, + }, + fakeRunner: fakeGitCommandRunner{ + coreSSHCommand: makeGitCoreSSHCommand("tsh", "org"), + remoteURL: "git@github.com:gravitational/teleport.git", + verifyCommand: func(cmd *exec.Cmd) error { + expect := []string{ + "git", "config", "--local", + "--replace-all", "core.sshcommand", + "\"tsh\" git ssh --github-org gravitational", + } + if !slices.Equal(expect, cmd.Args) { + return trace.CompareFailed("expect %v but got %v", expect, cmd.Args) + } + return nil + }, + }, + checkError: require.NoError, + }, + { + name: "update failed missing url", + cmd: &gitConfigCommand{ + action: gitConfigActionUpdate, + }, + fakeRunner: fakeGitCommandRunner{ + coreSSHCommand: makeGitCoreSSHCommand("tsh", "org"), + remoteURL: "", + }, + checkError: require.Error, + }, + { + name: "reset no-op", + cmd: &gitConfigCommand{ + action: gitConfigActionReset, + }, + fakeRunner: fakeGitCommandRunner{ + coreSSHCommand: "", + }, + checkError: require.NoError, + }, + { + name: "reset no-op", + cmd: &gitConfigCommand{ + action: gitConfigActionReset, + }, + fakeRunner: fakeGitCommandRunner{ + coreSSHCommand: makeGitCoreSSHCommand("tsh", "org"), + verifyCommand: func(cmd *exec.Cmd) error { + expect := []string{ + "git", "config", "--local", + "--unset-all", "core.sshcommand", + } + if !slices.Equal(expect, cmd.Args) { + return trace.CompareFailed("expect %v but got %v", expect, cmd.Args) + } + return nil + }, + }, + checkError: require.NoError, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + cf := &CLIConf{ + Context: context.Background(), + OverrideStdout: &buf, + executablePath: "tsh", + cmdRunner: tt.fakeRunner.run, + lookPathOverride: "git", + } + tt.checkError(t, tt.cmd.run(cf)) + require.Contains(t, buf.String(), tt.checkOutputContains) + }) + } +} diff --git a/tool/tsh/common/git_ssh.go b/tool/tsh/common/git_ssh.go new file mode 100644 index 0000000000000..d4221d0f2f286 --- /dev/null +++ b/tool/tsh/common/git_ssh.go @@ -0,0 +1,86 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "fmt" + "os" + "strings" + + "github.com/alecthomas/kingpin/v2" + "github.com/gravitational/trace" + + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/client" +) + +// gitSSHCommand implements `tsh git ssh`. +// +// Note that this is a hidden command as it is only meant for 'git` to call. +// TODO(greedy52) support Git protocol v2. +type gitSSHCommand struct { + *kingpin.CmdClause + + gitHubOrg string + userHost string + command []string + options []string +} + +func newGitSSHCommand(parent *kingpin.CmdClause) *gitSSHCommand { + cmd := &gitSSHCommand{ + CmdClause: parent.Command("ssh", "Proxy Git commands using SSH").Hidden(), + } + + cmd.Flag("github-org", "GitHub organization.").Required().StringVar(&cmd.gitHubOrg) + cmd.Arg("[user@]host", "Remote hostname and the login to use").Required().StringVar(&cmd.userHost) + cmd.Arg("command", "Command to execute on a remote host").StringsVar(&cmd.command) + cmd.Flag("option", "OpenSSH options in the format used in the configuration file").Short('o').AllowDuplicate().StringsVar(&cmd.options) + return cmd +} + +func (c *gitSSHCommand) run(cf *CLIConf) error { + _, host, ok := strings.Cut(c.userHost, "@") + if !ok || host != "github.com" { + return trace.BadParameter("user-host %q is not GitHub", c.userHost) + } + + // TODO(greedy52) when git calls tsh, tsh cannot prompt for password (e.g. + // user session expired) using provided stdin pipe. `tc.Login` should try + // hijacking "/dev/tty" and replace `prompt.Stdin` temporarily. + identity, err := getGitHubIdentity(cf, c.gitHubOrg) + if err != nil { + return trace.Wrap(err) + } + logger.DebugContext(cf.Context, "Proxying git command for GitHub user.", "command", c.command, "user", identity.Username) + + cf.RemoteCommand = c.command + cf.Options = c.options + cf.UserHost = fmt.Sprintf("git@%s", types.MakeGitHubOrgServerDomain(c.gitHubOrg)) + + tc, err := makeClient(cf) + if err != nil { + return trace.Wrap(err) + } + tc.Stdin = os.Stdin + err = client.RetryWithRelogin(cf.Context, tc, func() error { + return tc.SSH(cf.Context, cf.RemoteCommand) + }) + return trace.Wrap(convertSSHExitCode(tc, err)) +} diff --git a/tool/tsh/common/git_test.go b/tool/tsh/common/git_test.go new file mode 100644 index 0000000000000..501004abd141d --- /dev/null +++ b/tool/tsh/common/git_test.go @@ -0,0 +1,79 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package common + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_parseGitSSHURL(t *testing.T) { + tests := []struct { + name string + input string + wantError bool + wantOut *gitSSHURL + }{ + { + name: "github ssh format", + input: "org-1234567@github.com:some-org/some-repo.git", + wantOut: &gitSSHURL{ + Protocol: "ssh", + Host: "github.com", + User: "org-1234567", + Path: "some-org/some-repo.git", + Port: 22, + }, + }, + { + name: "github ssh format invalid path", + input: "org-1234567@github.com:missing-org", + wantError: true, + }, + { + name: "ssh schema format", + input: "ssh://git@github.com/some-org/some-repo.git", + wantOut: &gitSSHURL{ + Protocol: "ssh", + Host: "github.com", + User: "git", + Path: "/some-org/some-repo.git", + }, + }, + { + name: "unsupported format", + input: "https://github.com/gravitational/teleport.git", + wantError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + out, err := parseGitSSHURL(tt.input) + t.Log(out, err) + if tt.wantError { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tt.wantOut, out) + }) + } +} diff --git a/tool/tsh/common/tsh.go b/tool/tsh/common/tsh.go index d65af0b7247db..7677a6a842251 100644 --- a/tool/tsh/common/tsh.go +++ b/tool/tsh/common/tsh.go @@ -575,6 +575,9 @@ type CLIConf struct { // profileStatusOverride overrides return of ProfileStatus(). used in tests. profileStatusOverride *client.ProfileStatus + + // lookPathOverride overrides return of LookPath(). used in tests. + lookPathOverride string } // Stdout returns the stdout writer. @@ -614,6 +617,14 @@ func (c *CLIConf) RunCommand(cmd *exec.Cmd) error { return trace.Wrap(cmd.Run()) } +// LookPath searches for an executable named file. +func (c *CLIConf) LookPath(file string) (string, error) { + if c.lookPathOverride != "" { + return c.lookPathOverride, nil + } + return exec.LookPath(file) +} + func Main() { cmdLineOrig := os.Args[1:] var cmdLine []string @@ -1637,6 +1648,12 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error { err = gitCmd.list.run(&cf) case gitCmd.login.FullCommand(): err = gitCmd.login.run(&cf) + case gitCmd.ssh.FullCommand(): + err = gitCmd.ssh.run(&cf) + case gitCmd.config.FullCommand(): + err = gitCmd.config.run(&cf) + case gitCmd.clone.FullCommand(): + err = gitCmd.clone.run(&cf) default: // Handle commands that might not be available. switch { @@ -3969,7 +3986,12 @@ func onSSH(cf *CLIConf) error { accessRequestForSSH, fmt.Sprintf("%s@%s", tc.HostLogin, tc.Host), ) + // Exit with the same exit status as the failed command. + return trace.Wrap(convertSSHExitCode(tc, err)) +} + +func convertSSHExitCode(tc *client.TeleportClient, err error) error { if tc.ExitStatus != 0 { var exitErr *common.ExitCodeError if errors.As(err, &exitErr) { From 802c0987cf476cef126e0973dd3f59a1f78abb61 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 9 Jan 2025 12:47:49 -0500 Subject: [PATCH 29/45] Fix flaky host user tests (#50911) Applies the same fixes as #49850 to ensure that the tests wait until the target host is routable before attempting to create any SSH sessions. Closes https://github.com/gravitational/teleport/issues/50910 --- integration/hostuser_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/integration/hostuser_test.go b/integration/hostuser_test.go index f917a95f872a5..02145bc38274e 100644 --- a/integration/hostuser_test.go +++ b/integration/hostuser_test.go @@ -667,6 +667,8 @@ func TestRootLoginAsHostUser(t *testing.T) { require.NoError(t, instance.StopAll()) }) + instance.WaitForNodeCount(context.Background(), helpers.Site, 1) + tests := []struct { name string command []string @@ -750,6 +752,8 @@ func TestRootStaticHostUsers(t *testing.T) { _, err = instance.StartNode(nodeCfg) require.NoError(t, err) + instance.WaitForNodeCount(context.Background(), helpers.Site, 2) + // Create host user resources. groups := []string{"foo", "bar"} goodLogin := utils.GenerateLocalUsername(t) From 9c4b162eb06d7cbc5df966aa99b33d3155fcd3fa Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Thu, 9 Jan 2025 18:15:19 +0000 Subject: [PATCH 30/45] Deflake TestAWSOIDCRequiredVPCSHelper_CombinedSubnetsForAVpcID (#50916) Instead of creating an entire cluster, just mock the client to return 0 database services (we were not creating any in the cluster anyway) --- lib/web/integrations_awsoidc.go | 2 +- lib/web/integrations_awsoidc_test.go | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/web/integrations_awsoidc.go b/lib/web/integrations_awsoidc.go index 844391b1523f9..100bc59a1ce93 100644 --- a/lib/web/integrations_awsoidc.go +++ b/lib/web/integrations_awsoidc.go @@ -1055,7 +1055,7 @@ func awsOIDCListAllDatabases(ctx context.Context, clt authclient.ClientI, integr return fetchedRDSs, nil } -func awsOIDCRequiredVPCSHelper(ctx context.Context, clt authclient.ClientI, req ui.AWSOIDCRequiredVPCSRequest, fetchedRDSs []*types.DatabaseV3) (*ui.AWSOIDCRequiredVPCSResponse, error) { +func awsOIDCRequiredVPCSHelper(ctx context.Context, clt client.GetResourcesClient, req ui.AWSOIDCRequiredVPCSRequest, fetchedRDSs []*types.DatabaseV3) (*ui.AWSOIDCRequiredVPCSResponse, error) { // Get all database services with ecs/fargate metadata label. fetchedDbSvcs, err := fetchAWSOIDCDatabaseServices(ctx, clt) if err != nil { diff --git a/lib/web/integrations_awsoidc_test.go b/lib/web/integrations_awsoidc_test.go index b8414570999dc..583c637866880 100644 --- a/lib/web/integrations_awsoidc_test.go +++ b/lib/web/integrations_awsoidc_test.go @@ -973,8 +973,6 @@ func TestAWSOIDCRequiredVPCSHelper(t *testing.T) { func TestAWSOIDCRequiredVPCSHelper_CombinedSubnetsForAVpcID(t *testing.T) { t.Parallel() ctx := context.Background() - env := newWebPack(t, 1) - clt := env.proxies[0].client rdsVPC1 := mustCreateRDS(t, types.RDS{ VPCID: "vpc-1", @@ -993,13 +991,20 @@ func TestAWSOIDCRequiredVPCSHelper_CombinedSubnetsForAVpcID(t *testing.T) { rdss := []*types.DatabaseV3{rdsVPC1, rdsVPC1a, rdsVPC2} - resp, err := awsOIDCRequiredVPCSHelper(ctx, clt, ui.AWSOIDCRequiredVPCSRequest{Region: "us-east-1"}, rdss) + resp, err := awsOIDCRequiredVPCSHelper(ctx, &mockGetResources{}, ui.AWSOIDCRequiredVPCSRequest{Region: "us-east-1"}, rdss) require.NoError(t, err) require.Len(t, resp.VPCMapOfSubnets, 2) require.ElementsMatch(t, []string{"subnet1", "subnet2", "subnet3", "subnet4"}, resp.VPCMapOfSubnets["vpc-1"]) require.ElementsMatch(t, []string{"subnet8"}, resp.VPCMapOfSubnets["vpc-2"]) } +type mockGetResources struct { +} + +func (m *mockGetResources) GetResources(ctx context.Context, req *proto.ListResourcesRequest) (*proto.ListResourcesResponse, error) { + return &proto.ListResourcesResponse{}, nil +} + func mustCreateRDS(t *testing.T, awsRDS types.RDS) *types.DatabaseV3 { rdsDB, err := types.NewDatabaseV3(types.Metadata{ Name: "x", From d5409cb481962b0a5b0f368fb1704a1ca8dbd9f2 Mon Sep 17 00:00:00 2001 From: Lisa Kim Date: Thu, 9 Jan 2025 11:58:52 -0800 Subject: [PATCH 31/45] Create v2 web api endpoints and required related changes (#50472) --- constants.go | 3 - lib/auth/trustedcluster.go | 4 +- lib/client/https_client.go | 5 +- lib/client/weblogin_test.go | 2 +- lib/httplib/httplib.go | 47 +++++++ lib/web/apiserver.go | 49 ++++++-- lib/web/apiserver_test.go | 116 +++++++++++++++++- lib/web/apiserver_test_utils.go | 6 +- lib/web/integrations_awsoidc.go | 1 + lib/web/join_tokens.go | 7 +- lib/web/join_tokens_test.go | 66 ++++++++++ web/packages/build/vite/config.ts | 16 +-- .../ManualDeploy/ManualDeploy.story.tsx | 10 +- .../EnrollEKSCluster/Dialogs.story.tsx | 2 +- .../EnrollEksCluster.story.tsx | 2 +- .../Kubernetes/HelmChart/HelmChart.story.tsx | 16 ++- .../DiscoveryConfigSsm.story.tsx | 8 +- .../DownloadScript/DownloadScript.story.tsx | 10 +- .../Enroll/AwsOidc/AwsOidc.test.tsx | 7 +- .../Authenticated/Authenticated.test.tsx | 9 +- web/packages/teleport/src/config.ts | 31 +++-- .../teleport/src/services/agents/make.ts | 2 +- web/packages/teleport/src/services/api/api.ts | 12 +- .../teleport/src/services/api/parseError.ts | 60 +++++++-- .../integrations/integrations.test.ts | 48 ++++++++ .../src/services/integrations/integrations.ts | 26 +++- .../src/services/integrations/types.ts | 6 + .../src/services/joinToken/joinToken.test.ts | 28 ++++- .../src/services/joinToken/joinToken.ts | 52 +++++--- .../teleport/src/services/joinToken/types.ts | 9 ++ .../src/services/version/unsupported.ts | 33 +++++ 31 files changed, 597 insertions(+), 96 deletions(-) create mode 100644 web/packages/teleport/src/services/version/unsupported.ts diff --git a/constants.go b/constants.go index ff9356f2b63d6..79f97ae24bfaf 100644 --- a/constants.go +++ b/constants.go @@ -25,9 +25,6 @@ import ( "github.com/gravitational/trace" ) -// WebAPIVersion is a current webapi version -const WebAPIVersion = "v1" - const ( // SSHAuthSock is the environment variable pointing to the // Unix socket the SSH agent is running on. diff --git a/lib/auth/trustedcluster.go b/lib/auth/trustedcluster.go index 443cb6579013e..a02e8f4b74de6 100644 --- a/lib/auth/trustedcluster.go +++ b/lib/auth/trustedcluster.go @@ -679,7 +679,9 @@ func (a *Server) sendValidateRequestToProxy(ctx context.Context, host string, va opts = append(opts, roundtrip.HTTPClient(insecureWebClient)) } - clt, err := roundtrip.NewClient(proxyAddr.String(), teleport.WebAPIVersion, opts...) + // We do not add the version prefix since web api endpoints will + // contain differing version prefixes. + clt, err := roundtrip.NewClient(proxyAddr.String(), "" /* version prefix */, opts...) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/client/https_client.go b/lib/client/https_client.go index 66293120843b2..f1b6a0cee8654 100644 --- a/lib/client/https_client.go +++ b/lib/client/https_client.go @@ -28,7 +28,6 @@ import ( "github.com/gravitational/trace" "golang.org/x/net/http/httpproxy" - "github.com/gravitational/teleport" tracehttp "github.com/gravitational/teleport/api/observability/tracing/http" apiutils "github.com/gravitational/teleport/api/utils" "github.com/gravitational/teleport/lib/httplib" @@ -62,7 +61,9 @@ func httpTransport(insecure bool, pool *x509.CertPool) *http.Transport { func NewWebClient(url string, opts ...roundtrip.ClientParam) (*WebClient, error) { opts = append(opts, roundtrip.SanitizerEnabled(true)) - clt, err := roundtrip.NewClient(url, teleport.WebAPIVersion, opts...) + // We do not add the version prefix since web api endpoints will contain + // differing version prefixes. + clt, err := roundtrip.NewClient(url, "" /* version prefix */, opts...) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/client/weblogin_test.go b/lib/client/weblogin_test.go index cca05b892fe2b..1008308411d50 100644 --- a/lib/client/weblogin_test.go +++ b/lib/client/weblogin_test.go @@ -74,7 +74,7 @@ func TestHostCredentialsHttpFallback(t *testing.T) { // Start an http server (not https) so that the request only succeeds // if the fallback occurs. var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI != "/v1/webapi/host/credentials" { + if r.RequestURI != "/webapi/host/credentials" { w.WriteHeader(http.StatusNotFound) return } diff --git a/lib/httplib/httplib.go b/lib/httplib/httplib.go index f241f6d36ddb8..c345e3b2e854c 100644 --- a/lib/httplib/httplib.go +++ b/lib/httplib/httplib.go @@ -22,6 +22,7 @@ package httplib import ( "bufio" + "context" "encoding/json" "errors" "log/slog" @@ -33,6 +34,7 @@ import ( "strconv" "strings" + "github.com/coreos/go-semver/semver" "github.com/gravitational/roundtrip" "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" @@ -211,6 +213,51 @@ func ConvertResponse(re *roundtrip.Response, err error) (*roundtrip.Response, er return re, trace.ReadError(re.Code(), re.Bytes()) } +// ProxyVersion describes the parts of a Proxy semver +// version in the format: major.minor.patch-preRelease +type ProxyVersion struct { + // Major is the first part of version. + Major int64 `json:"major"` + // Minor is the second part of version. + Minor int64 `json:"minor"` + // Patch is the third part of version. + Patch int64 `json:"patch"` + // PreRelease is only defined if there was a hyphen + // and a word at the end of version eg: the prerelease + // value of version 18.0.0-dev is "dev". + PreRelease string `json:"preRelease"` + // String contains the whole version. + String string `json:"string"` +} + +// RouteNotFoundResponse writes a JSON error reply containing +// a not found error, a Version object, and a not found HTTP status code. +func RouteNotFoundResponse(ctx context.Context, w http.ResponseWriter, proxyVersion string) { + SetDefaultSecurityHeaders(w.Header()) + + errObj := &trace.TraceErr{ + Err: trace.NotFound("path not found"), + } + + ver, err := semver.NewVersion(proxyVersion) + if err != nil { + slog.DebugContext(ctx, "Error parsing Teleport proxy semver version", "err", err) + } else { + verObj := ProxyVersion{ + Major: ver.Major, + Minor: ver.Minor, + Patch: ver.Patch, + String: proxyVersion, + PreRelease: string(ver.PreRelease), + } + fields := make(map[string]interface{}) + fields["proxyVersion"] = verObj + errObj.Fields = fields + } + + roundtrip.ReplyJSON(w, http.StatusNotFound, errObj) +} + // ParseBool will parse boolean variable from url query // returns value, ok, error func ParseBool(q url.Values, name string) (bool, bool, error) { diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index d8d620ce73ac6..c81a422beca5d 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -446,8 +446,6 @@ func (h *APIHandler) Close() error { // NewHandler returns a new instance of web proxy handler func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { - const apiPrefix = "/" + teleport.WebAPIVersion - cfg.SetDefaults() h := &Handler{ @@ -612,13 +610,31 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { h.nodeWatcher = cfg.NodeWatcher } - routingHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // ensure security headers are set for all responses - httplib.SetDefaultSecurityHeaders(w.Header()) - - // request is going to the API? - if strings.HasPrefix(r.URL.Path, apiPrefix) { - http.StripPrefix(apiPrefix, h).ServeHTTP(w, r) + const v1Prefix = "/v1" + notFoundRoutingHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Request is going to the API? + // If no routes were matched, it could be because it's a path with `v1` prefix + // (eg: the Teleport web app will call "most" endpoints with v1 prefixed). + // + // `v1` paths are not defined with `v1` prefix. If the path turns out to be prefixed + // with `v1`, it will be stripped and served again. Historically, that's how it started + // and should be kept that way to prevent breakage. + // + // v2+ prefixes will be expected by both caller and definition and will not be stripped. + if strings.HasPrefix(r.URL.Path, v1Prefix) { + pathParts := strings.Split(r.URL.Path, "/") + if len(pathParts) > 2 { + // check against known second part of path to ensure we + // aren't allowing paths like /v1/v2/webapi + // part[0] is empty space from leading slash "/" + // part[1] is the prefix "v1" + switch pathParts[2] { + case "webapi", "enterprise", "scripts", ".well-known", "workload-identity": + http.StripPrefix(v1Prefix, h).ServeHTTP(w, r) + return + } + } + httplib.RouteNotFoundResponse(r.Context(), w, teleport.Version) return } @@ -670,11 +686,12 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { h.logger.ErrorContext(r.Context(), "Failed to execute index page template", "error", err) } } else { - http.NotFound(w, r) + httplib.RouteNotFoundResponse(r.Context(), w, teleport.Version) + return } }) - h.NotFound = routingHandler + h.NotFound = notFoundRoutingHandler if cfg.PluginRegistry != nil { if err := cfg.PluginRegistry.RegisterProxyWebHandlers(h); err != nil { @@ -867,8 +884,12 @@ func (h *Handler) bindDefaultEndpoints() { h.POST("/webapi/tokens", h.WithAuth(h.upsertTokenHandle)) // used for updating a token h.PUT("/webapi/tokens", h.WithAuth(h.upsertTokenHandle)) - // used for creating tokens used during guided discover flows + // TODO(kimlisa): DELETE IN 19.0 - Replaced by /v2/webapi/token endpoint + // MUST delete with related code found in web/packages/teleport/src/services/joinToken/joinToken.ts(fetchJoinToken) h.POST("/webapi/token", h.WithAuth(h.createTokenForDiscoveryHandle)) + // used for creating tokens used during guided discover flows + // v2 endpoint processes "suggestedLabels" field + h.POST("/v2/webapi/token", h.WithAuth(h.createTokenForDiscoveryHandle)) h.GET("/webapi/tokens", h.WithAuth(h.getTokens)) h.DELETE("/webapi/tokens", h.WithAuth(h.deleteToken)) @@ -1000,7 +1021,11 @@ func (h *Handler) bindDefaultEndpoints() { h.GET("/webapi/scripts/integrations/configure/deployservice-iam.sh", h.WithLimiter(h.awsOIDCConfigureDeployServiceIAM)) h.POST("/webapi/sites/:site/integrations/aws-oidc/:name/ec2", h.WithClusterAuth(h.awsOIDCListEC2)) h.POST("/webapi/sites/:site/integrations/aws-oidc/:name/eksclusters", h.WithClusterAuth(h.awsOIDCListEKSClusters)) + // TODO(kimlisa): DELETE IN 19.0 - replaced by /v2/webapi/sites/:site/integrations/aws-oidc/:name/enrolleksclusters + // MUST delete with related code found in web/packages/teleport/src/services/integrations/integrations.ts(enrollEksClusters) h.POST("/webapi/sites/:site/integrations/aws-oidc/:name/enrolleksclusters", h.WithClusterAuth(h.awsOIDCEnrollEKSClusters)) + // v2 endpoint introduces "extraLabels" field. + h.POST("/v2/webapi/sites/:site/integrations/aws-oidc/:name/enrolleksclusters", h.WithClusterAuth(h.awsOIDCEnrollEKSClusters)) h.POST("/webapi/sites/:site/integrations/aws-oidc/:name/ec2ice", h.WithClusterAuth(h.awsOIDCListEC2ICE)) h.POST("/webapi/sites/:site/integrations/aws-oidc/:name/deployec2ice", h.WithClusterAuth(h.awsOIDCDeployEC2ICE)) h.POST("/webapi/sites/:site/integrations/aws-oidc/:name/securitygroups", h.WithClusterAuth(h.awsOIDCListSecurityGroups)) diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index b220d593bab5b..71b51568c5610 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -49,6 +49,7 @@ import ( "testing" "time" + "github.com/coreos/go-semver/semver" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -464,7 +465,7 @@ func newWebSuiteWithConfig(t *testing.T, cfg webSuiteConfig) *WebSuite { // Expired sessions are purged immediately var sessionLingeringThreshold time.Duration - fs, err := newDebugFileSystem() + fs, err := NewDebugFileSystem(false) require.NoError(t, err) features := *modules.GetModules().Features().ToProto() // safe to dereference because ToProto creates a struct and return a pointer to it @@ -3433,6 +3434,115 @@ func TestTokenGeneration(t *testing.T) { } } +func TestEndpointNotFoundHandling(t *testing.T) { + t.Parallel() + const username = "test-user@example.com" + // Allow user to create tokens. + roleTokenCRD, err := types.NewRole(services.RoleNameForUser(username), types.RoleSpecV6{ + Allow: types.RoleConditions{ + Rules: []types.Rule{ + types.NewRule(types.KindToken, + []string{types.VerbCreate}), + }, + }, + }) + require.NoError(t, err) + + env := newWebPack(t, 1) + proxy := env.proxies[0] + pack := proxy.authPack(t, username, []types.Role{roleTokenCRD}) + + tt := []struct { + name string + endpoint string + shouldErr bool + }{ + { + name: "valid endpoint without v1 prefix", + endpoint: "webapi/token", + }, + { + name: "valid endpoint with v1 prefix", + endpoint: "v1/webapi/token", + }, + { + name: "valid endpoint with v2 prefix", + endpoint: "v2/webapi/token", + }, + { + name: "invalid double version prefixes", + endpoint: "v1/v2/webapi/token", + shouldErr: true, + }, + { + name: "route not matched version prefix", + endpoint: "v9999999/webapi/token", + shouldErr: true, + }, + { + name: "non api route with prefix", + endpoint: "v1/something/else", + shouldErr: true, + }, + { + name: "invalid triple version prefixes", + endpoint: "v1/v1/v1/webapi/token", + shouldErr: true, + }, + { + name: "invalid just prefix", + endpoint: "v1", + shouldErr: true, + }, + { + name: "invalid prefix", + endpoint: "v1s/webapi/token", + shouldErr: true, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + re, err := pack.clt.PostJSON(context.Background(), fmt.Sprintf("%s/%s", proxy.web.URL, tc.endpoint), types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + JoinMethod: types.JoinMethodToken, + }) + + if tc.shouldErr { + require.True(t, trace.IsNotFound(err)) + + jsonResp := struct { + Error struct { + Message string + } + Fields struct { + ProxyVersion httplib.ProxyVersion + } + }{} + + require.NoError(t, json.Unmarshal(re.Bytes(), &jsonResp)) + require.Equal(t, "path not found", jsonResp.Error.Message) + require.Equal(t, teleport.Version, jsonResp.Fields.ProxyVersion.String) + + ver, err := semver.NewVersion(teleport.Version) + require.NoError(t, err) + require.Equal(t, ver.Major, jsonResp.Fields.ProxyVersion.Major) + require.Equal(t, ver.Minor, jsonResp.Fields.ProxyVersion.Minor) + require.Equal(t, ver.Patch, jsonResp.Fields.ProxyVersion.Patch) + require.Equal(t, string(ver.PreRelease), jsonResp.Fields.ProxyVersion.PreRelease) + + } else { + require.NoError(t, err) + + var responseToken nodeJoinToken + err = json.Unmarshal(re.Bytes(), &responseToken) + require.NoError(t, err) + require.Equal(t, types.JoinMethodToken, responseToken.Method) + } + }) + } +} + func TestInstallDatabaseScriptGeneration(t *testing.T) { const username = "test-user@example.com" @@ -5015,7 +5125,7 @@ func TestDeleteMFA(t *testing.T) { jar, err := cookiejar.New(nil) require.NoError(t, err) opts := []roundtrip.ClientParam{roundtrip.BearerAuth(pack.session.Token), roundtrip.CookieJar(jar), roundtrip.HTTPClient(client.NewInsecureWebClient())} - rclt, err := roundtrip.NewClient(proxy.webURL.String(), teleport.WebAPIVersion, opts...) + rclt, err := roundtrip.NewClient(proxy.webURL.String(), "", opts...) require.NoError(t, err) clt := client.WebClient{Client: rclt} jar.SetCookies(&proxy.webURL, pack.cookies) @@ -8319,7 +8429,7 @@ func createProxy(ctx context.Context, t *testing.T, proxyID string, node *regula require.NoError(t, err) t.Cleanup(func() { require.NoError(t, proxyServer.Close()) }) - fs, err := newDebugFileSystem() + fs, err := NewDebugFileSystem(false) require.NoError(t, err) authID := state.IdentityID{ diff --git a/lib/web/apiserver_test_utils.go b/lib/web/apiserver_test_utils.go index d7fe5cd0bb3d7..9e6fff840b514 100644 --- a/lib/web/apiserver_test_utils.go +++ b/lib/web/apiserver_test_utils.go @@ -29,10 +29,14 @@ import ( ) // NewDebugFileSystem returns the HTTP file system implementation -func newDebugFileSystem() (http.FileSystem, error) { +func NewDebugFileSystem(isEnterprise bool) (http.FileSystem, error) { // If the location of the UI changes on disk then this will need to be updated. assetsPath := "../../webassets/teleport" + if isEnterprise { + assetsPath = "../../../webassets/teleport" + } + // Ensure we have the built assets available before continuing. for _, af := range []string{"index.html", "/app"} { _, err := os.Stat(filepath.Join(assetsPath, af)) diff --git a/lib/web/integrations_awsoidc.go b/lib/web/integrations_awsoidc.go index 100bc59a1ce93..b2a1fcb17315f 100644 --- a/lib/web/integrations_awsoidc.go +++ b/lib/web/integrations_awsoidc.go @@ -743,6 +743,7 @@ func (h *Handler) awsOIDCConfigureEKSIAM(w http.ResponseWriter, r *http.Request, } // awsOIDCEnrollEKSClusters enroll EKS clusters by installing teleport-kube-agent Helm chart on them. +// v2 endpoint introduces "extraLabels" field. func (h *Handler) awsOIDCEnrollEKSClusters(w http.ResponseWriter, r *http.Request, p httprouter.Params, sctx *SessionContext, site reversetunnelclient.RemoteSite) (any, error) { ctx := r.Context() diff --git a/lib/web/join_tokens.go b/lib/web/join_tokens.go index 033040a8545e0..df9896f5e1532 100644 --- a/lib/web/join_tokens.go +++ b/lib/web/join_tokens.go @@ -254,6 +254,8 @@ func (h *Handler) upsertTokenHandle(w http.ResponseWriter, r *http.Request, para return uiToken, nil } +// createTokenForDiscoveryHandle creates tokens used during guided discover flows. +// V2 endpoint processes "suggestedLabels" field. func (h *Handler) createTokenForDiscoveryHandle(w http.ResponseWriter, r *http.Request, params httprouter.Params, ctx *SessionContext) (interface{}, error) { clt, err := ctx.GetClient() if err != nil { @@ -342,9 +344,10 @@ func (h *Handler) createTokenForDiscoveryHandle(w http.ResponseWriter, r *http.R // We create an ID and return it as part of the Token, so the UI can use this ID to query the Node that joined using this token // WebUI can then query the resources by this id and answer the question: // - Which Node joined the cluster from this token Y? - req.SuggestedLabels = types.Labels{ - types.InternalResourceIDLabel: apiutils.Strings{uuid.NewString()}, + if req.SuggestedLabels == nil { + req.SuggestedLabels = make(types.Labels) } + req.SuggestedLabels[types.InternalResourceIDLabel] = apiutils.Strings{uuid.NewString()} provisionToken, err := types.NewProvisionTokenFromSpec(tokenName, expires, req) if err != nil { diff --git a/lib/web/join_tokens_test.go b/lib/web/join_tokens_test.go index 08be47c4e448c..ba0b0be4ff9b1 100644 --- a/lib/web/join_tokens_test.go +++ b/lib/web/join_tokens_test.go @@ -43,6 +43,7 @@ import ( "github.com/gravitational/teleport/lib/fixtures" "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/services" + libui "github.com/gravitational/teleport/lib/ui" "github.com/gravitational/teleport/lib/web/ui" ) @@ -327,6 +328,71 @@ func TestDeleteToken(t *testing.T) { require.Empty(t, cmp.Diff(resp.Items, []ui.JoinToken{staticUIToken}, cmpopts.IgnoreFields(ui.JoinToken{}, "Content"))) } +func TestCreateTokenForDiscovery(t *testing.T) { + ctx := context.Background() + username := "test-user@example.com" + env := newWebPack(t, 1) + proxy := env.proxies[0] + pack := proxy.authPack(t, username, nil /* roles */) + + match := func(resp nodeJoinToken, userLabels types.Labels) { + if len(userLabels) > 0 { + require.Empty(t, cmp.Diff([]libui.Label{{Name: "env"}, {Name: "teleport.internal/resource-id"}}, resp.SuggestedLabels, cmpopts.SortSlices( + func(a, b libui.Label) bool { + return a.Name < b.Name + }, + ), cmpopts.IgnoreFields(libui.Label{}, "Value"))) + } else { + require.Empty(t, cmp.Diff([]libui.Label{{Name: "teleport.internal/resource-id"}}, resp.SuggestedLabels, cmpopts.IgnoreFields(libui.Label{}, "Value"))) + } + require.NotEmpty(t, resp.ID) + require.NotEmpty(t, resp.Expiry) + require.Equal(t, types.JoinMethodToken, resp.Method) + } + + tt := []struct { + name string + req types.ProvisionTokenSpecV2 + }{ + { + name: "with suggested labels", + req: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + SuggestedLabels: types.Labels{"env": []string{"testing"}}, + }, + }, + { + name: "without suggested labels", + req: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + SuggestedLabels: nil, + }, + }, + } + + for _, tc := range tt { + t.Run(fmt.Sprintf("v1 %s", tc.name), func(t *testing.T) { + endpointV1 := pack.clt.Endpoint("v1", "webapi", "token") + re, err := pack.clt.PostJSON(ctx, endpointV1, tc.req) + require.NoError(t, err) + + resp := nodeJoinToken{} + require.NoError(t, json.Unmarshal(re.Bytes(), &resp)) + match(resp, tc.req.SuggestedLabels) + }) + + t.Run(fmt.Sprintf("v2 %s", tc.name), func(t *testing.T) { + endpointV2 := pack.clt.Endpoint("v2", "webapi", "token") + re, err := pack.clt.PostJSON(ctx, endpointV2, tc.req) + require.NoError(t, err) + + resp := nodeJoinToken{} + require.NoError(t, json.Unmarshal(re.Bytes(), &resp)) + match(resp, tc.req.SuggestedLabels) + }) + } +} + func TestGenerateAzureTokenName(t *testing.T) { t.Parallel() rule1 := types.ProvisionTokenSpecV2Azure_Rule{ diff --git a/web/packages/build/vite/config.ts b/web/packages/build/vite/config.ts index 0d15db5fe3dbc..a429b6365aebd 100644 --- a/web/packages/build/vite/config.ts +++ b/web/packages/build/vite/config.ts @@ -105,14 +105,14 @@ export function createViteConfig( config.server.proxy = { // The format of the regex needs to assume that the slashes are escaped, for example: // \/v1\/webapi\/sites\/:site\/connect - [`^\\/v1\\/webapi\\/sites\\/${siteName}\\/connect`]: { + [`^\\/v[0-9]+\\/webapi\\/sites\\/${siteName}\\/connect`]: { target: `wss://${target}`, changeOrigin: false, secure: false, ws: true, }, // /webapi/sites/:site/desktops/:desktopName/connect - [`^\\/v1\\/webapi\\/sites\\/${siteName}\\/desktops\\/${siteName}\\/connect`]: + [`^\\/v[0-9]+\\/webapi\\/sites\\/${siteName}\\/desktops\\/${siteName}\\/connect`]: { target: `wss://${target}`, changeOrigin: false, @@ -120,31 +120,31 @@ export function createViteConfig( ws: true, }, // /webapi/sites/:site/kube/exec - [`^\\/v1\\/webapi\\/sites\\/${siteName}\\/kube/exec`]: { + [`^\\/v[0-9]+\\/webapi\\/sites\\/${siteName}\\/kube/exec`]: { target: `wss://${target}`, changeOrigin: false, secure: false, ws: true, }, // /webapi/sites/:site/desktopplayback/:sid - '^\\/v1\\/webapi\\/sites\\/(.*?)\\/desktopplayback\\/(.*?)': { + '^\\/v[0-9]+\\/webapi\\/sites\\/(.*?)\\/desktopplayback\\/(.*?)': { target: `wss://${target}`, changeOrigin: false, secure: false, ws: true, }, - '^\\/v1\\/webapi\\/assistant\\/(.*?)': { + '^\\/v[0-9]+\\/webapi\\/assistant\\/(.*?)': { target: `https://${target}`, changeOrigin: false, secure: false, }, - [`^\\/v1\\/webapi\\/sites\\/${siteName}\\/assistant`]: { + [`^\\/v[0-9]+\\/webapi\\/sites\\/${siteName}\\/assistant`]: { target: `wss://${target}`, changeOrigin: false, secure: false, ws: true, }, - '^\\/v1\\/webapi\\/command\\/(.*?)/execute': { + '^\\/v[0-9]+\\/webapi\\/command\\/(.*?)/execute': { target: `wss://${target}`, changeOrigin: false, secure: false, @@ -155,7 +155,7 @@ export function createViteConfig( changeOrigin: true, secure: false, }, - '/v1': { + '^\\/v[0-9]+': { target: `https://${target}`, changeOrigin: true, secure: false, diff --git a/web/packages/teleport/src/Discover/Database/DeployService/ManualDeploy/ManualDeploy.story.tsx b/web/packages/teleport/src/Discover/Database/DeployService/ManualDeploy/ManualDeploy.story.tsx index 4b11b3df65403..6cbf302b8b66c 100644 --- a/web/packages/teleport/src/Discover/Database/DeployService/ManualDeploy/ManualDeploy.story.tsx +++ b/web/packages/teleport/src/Discover/Database/DeployService/ManualDeploy/ManualDeploy.story.tsx @@ -53,7 +53,9 @@ export const Init = () => { Init.parameters = { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => HttpResponse.json(rawJoinToken)), + http.post(cfg.api.discoveryJoinToken.createV2, () => + HttpResponse.json(rawJoinToken) + ), ], }, }; @@ -74,7 +76,11 @@ export const InitWithLabels = () => { }; InitWithLabels.parameters = { msw: { - handlers: [http.post(cfg.api.joinTokenPath, () => HttpResponse.json({}))], + handlers: [ + http.post(cfg.api.discoveryJoinToken.createV2, () => + HttpResponse.json({}) + ), + ], }, }; diff --git a/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/Dialogs.story.tsx b/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/Dialogs.story.tsx index 76d6d67a0af96..e6dbe0bcb7635 100644 --- a/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/Dialogs.story.tsx +++ b/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/Dialogs.story.tsx @@ -221,7 +221,7 @@ ManualHelmDialogStory.storyName = 'ManualHelmDialog'; ManualHelmDialogStory.parameters = { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => { + http.post(cfg.api.discoveryJoinToken.createV2, () => { return HttpResponse.json({ id: 'token-id', suggestedLabels: [ diff --git a/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/EnrollEksCluster.story.tsx b/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/EnrollEksCluster.story.tsx index 9feab905372e7..77e4ff0d5f263 100644 --- a/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/EnrollEksCluster.story.tsx +++ b/web/packages/teleport/src/Discover/Kubernetes/EnrollEKSCluster/EnrollEksCluster.story.tsx @@ -69,7 +69,7 @@ export default { ], }; -const tokenHandler = http.post(cfg.api.joinTokenPath, () => { +const tokenHandler = http.post(cfg.api.discoveryJoinToken.createV2, () => { return HttpResponse.json({ id: 'token-id', suggestedLabels: [ diff --git a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.story.tsx b/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.story.tsx index e112ed2c1ae89..911c013c81046 100644 --- a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.story.tsx +++ b/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.story.tsx @@ -60,7 +60,9 @@ export const Polling: StoryObj = { http.get(kubePathWithoutQuery, async () => { await delay('infinite'); }), - http.post(cfg.api.joinTokenPath, () => HttpResponse.json(rawJoinToken)), + http.post(cfg.api.discoveryJoinToken.createV2, () => + HttpResponse.json(rawJoinToken) + ), ], }, }, @@ -80,7 +82,9 @@ export const PollingSuccess: StoryObj = { http.get(kubePathWithoutQuery, () => { return HttpResponse.json({ items: [{}] }); }), - http.post(cfg.api.joinTokenPath, () => HttpResponse.json(rawJoinToken)), + http.post(cfg.api.discoveryJoinToken.createV2, () => + HttpResponse.json(rawJoinToken) + ), ], }, }, @@ -103,7 +107,9 @@ export const PollingError: StoryObj = { http.get(kubePathWithoutQuery, async () => { await delay('infinite'); }), - http.post(cfg.api.joinTokenPath, () => HttpResponse.json(rawJoinToken)), + http.post(cfg.api.discoveryJoinToken.createV2, () => + HttpResponse.json(rawJoinToken) + ), ], }, }, @@ -120,7 +126,7 @@ export const Processing: StoryObj = { parameters: { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, async () => { + http.post(cfg.api.discoveryJoinToken.createV2, async () => { await delay('infinite'); }), ], @@ -139,7 +145,7 @@ export const Failed: StoryObj = { parameters: { msw: { handlers: [ - http.post(cfg.getJoinTokenUrl(), () => + http.post(cfg.api.discoveryJoinToken.createV2, () => HttpResponse.json( { error: { message: 'Whoops, something went wrong.' }, diff --git a/web/packages/teleport/src/Discover/Server/DiscoveryConfigSsm/DiscoveryConfigSsm.story.tsx b/web/packages/teleport/src/Discover/Server/DiscoveryConfigSsm/DiscoveryConfigSsm.story.tsx index 4deb03187e722..08f5d55f73234 100644 --- a/web/packages/teleport/src/Discover/Server/DiscoveryConfigSsm/DiscoveryConfigSsm.story.tsx +++ b/web/packages/teleport/src/Discover/Server/DiscoveryConfigSsm/DiscoveryConfigSsm.story.tsx @@ -66,7 +66,7 @@ export const SuccessCloud = () => { SuccessCloud.parameters = { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => + http.post(cfg.api.discoveryJoinToken.createV2, () => HttpResponse.json({ id: 'token-id' }) ), http.post(cfg.api.discoveryConfigPath, () => @@ -90,7 +90,7 @@ export const SuccessSelfHosted = () => ( SuccessSelfHosted.parameters = { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => + http.post(cfg.api.discoveryJoinToken.createV2, () => HttpResponse.json({ id: 'token-id' }) ), http.post(cfg.api.discoveryConfigPath, () => @@ -107,7 +107,7 @@ export const Loading = () => { Loading.parameters = { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => + http.post(cfg.api.discoveryJoinToken.createV2, () => HttpResponse.json({ id: 'token-id' }) ), http.post(cfg.api.discoveryConfigPath, () => delay('infinite')), @@ -122,7 +122,7 @@ export const Failed = () => { Failed.parameters = { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => + http.post(cfg.api.discoveryJoinToken.createV2, () => HttpResponse.json({ id: 'token-id' }) ), http.post(cfg.api.discoveryConfigPath, () => diff --git a/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx b/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx index 02f2b8e488127..7dad3e0ec67de 100644 --- a/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx +++ b/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx @@ -63,7 +63,7 @@ export const Polling: StoryObj = { http.get(nodesPathWithoutQuery, () => { return delay('infinite'); }), - http.post(cfg.api.joinTokenPath, () => { + http.post(cfg.api.discoveryJoinToken.createV2, () => { return HttpResponse.json(joinToken); }), ], @@ -86,7 +86,7 @@ export const PollingSuccess: StoryObj = { http.get(nodesPathWithoutQuery, () => { return HttpResponse.json({ items: [{}] }); }), - http.post(cfg.api.joinTokenPath, () => { + http.post(cfg.api.discoveryJoinToken.createV2, () => { return HttpResponse.json(joinToken); }), ], @@ -111,7 +111,7 @@ export const PollingError: StoryObj = { http.get(nodesPathWithoutQuery, () => { return delay('infinite'); }), - http.post(cfg.api.joinTokenPath, () => { + http.post(cfg.api.discoveryJoinToken.createV2, () => { return HttpResponse.json(joinToken); }), ], @@ -130,7 +130,7 @@ export const Processing: StoryObj = { parameters: { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => { + http.post(cfg.api.discoveryJoinToken.createV2, () => { return delay('infinite'); }), ], @@ -149,7 +149,7 @@ export const Failed: StoryObj = { parameters: { msw: { handlers: [ - http.post(cfg.api.joinTokenPath, () => { + http.post(cfg.api.discoveryJoinToken.createV2, () => { return HttpResponse.json( { error: { message: 'Whoops, something went wrong.' }, diff --git a/web/packages/teleport/src/Integrations/Enroll/AwsOidc/AwsOidc.test.tsx b/web/packages/teleport/src/Integrations/Enroll/AwsOidc/AwsOidc.test.tsx index 882bf66d2a59b..142e680f8c8ec 100644 --- a/web/packages/teleport/src/Integrations/Enroll/AwsOidc/AwsOidc.test.tsx +++ b/web/packages/teleport/src/Integrations/Enroll/AwsOidc/AwsOidc.test.tsx @@ -125,7 +125,10 @@ test('generate command', async () => { // Test create is still called with 404 ping error. jest.clearAllMocks(); - let error = new ApiError('', { status: 404 } as Response); + let error = new ApiError({ + message: '', + response: { status: 404 } as Response, + }); spyPing = jest .spyOn(integrationService, 'pingAwsOidcIntegration') .mockRejectedValue(error); @@ -136,7 +139,7 @@ test('generate command', async () => { // Test create isn't called with non 404 error jest.clearAllMocks(); - error = new ApiError('', { status: 400 } as Response); + error = new ApiError({ message: '', response: { status: 400 } as Response }); spyPing = jest .spyOn(integrationService, 'pingAwsOidcIntegration') .mockRejectedValue(error); diff --git a/web/packages/teleport/src/components/Authenticated/Authenticated.test.tsx b/web/packages/teleport/src/components/Authenticated/Authenticated.test.tsx index 64e24fd97b312..00478177ab623 100644 --- a/web/packages/teleport/src/components/Authenticated/Authenticated.test.tsx +++ b/web/packages/teleport/src/components/Authenticated/Authenticated.test.tsx @@ -69,9 +69,12 @@ describe('session', () => { }); test('valid session and invalid cookie', async () => { - const mockForbiddenError = new ApiError('some error', { - status: 403, - } as Response); + const mockForbiddenError = new ApiError({ + message: 'some error', + response: { + status: 403, + } as Response, + }); jest .spyOn(session, 'validateCookieAndSession') diff --git a/web/packages/teleport/src/config.ts b/web/packages/teleport/src/config.ts index d0daeb59e6c88..201a8cbac0932 100644 --- a/web/packages/teleport/src/config.ts +++ b/web/packages/teleport/src/config.ts @@ -284,7 +284,11 @@ const cfg = { trustedClustersPath: '/v1/webapi/trustedcluster/:name?', connectMyComputerLoginsPath: '/v1/webapi/connectmycomputer/logins', - joinTokenPath: '/v1/webapi/token', + discoveryJoinToken: { + // TODO(kimlisa): DELETE IN 19.0 - replaced by /v2/webapi/token + create: '/v1/webapi/token', + createV2: '/v2/webapi/token', + }, joinTokenYamlPath: '/v1/webapi/tokens/yaml', joinTokensPath: '/v1/webapi/tokens', dbScriptPath: '/scripts/:token/install-database.sh', @@ -367,8 +371,14 @@ const cfg = { eksClustersListPath: '/v1/webapi/sites/:clusterId/integrations/aws-oidc/:name/eksclusters', - eksEnrollClustersPath: - '/v1/webapi/sites/:clusterId/integrations/aws-oidc/:name/enrolleksclusters', + + eks: { + // TODO(kimlisa): DELETE IN 19.0 - replaced by /v2/webapi/sites/:clusterId/integrations/aws-oidc/:name/enrolleksclusters + enroll: + '/v1/webapi/sites/:clusterId/integrations/aws-oidc/:name/enrolleksclusters', + enrollV2: + '/v2/webapi/sites/:clusterId/integrations/aws-oidc/:name/enrolleksclusters', + }, ec2InstancesListPath: '/v1/webapi/sites/:clusterId/integrations/aws-oidc/:name/ec2', @@ -574,10 +584,6 @@ const cfg = { return cfg.api.joinTokensPath; }, - getJoinTokenUrl() { - return cfg.api.joinTokenPath; - }, - getJoinTokenYamlUrl() { return cfg.api.joinTokenYamlPath; }, @@ -1083,7 +1089,16 @@ const cfg = { getEnrollEksClusterUrl(integrationName: string): string { const clusterId = cfg.proxyCluster; - return generatePath(cfg.api.eksEnrollClustersPath, { + return generatePath(cfg.api.eks.enroll, { + clusterId, + name: integrationName, + }); + }, + + getEnrollEksClusterUrlV2(integrationName: string): string { + const clusterId = cfg.proxyCluster; + + return generatePath(cfg.api.eks.enrollV2, { clusterId, name: integrationName, }); diff --git a/web/packages/teleport/src/services/agents/make.ts b/web/packages/teleport/src/services/agents/make.ts index 4bc8afe8186fc..8cb27dbed5191 100644 --- a/web/packages/teleport/src/services/agents/make.ts +++ b/web/packages/teleport/src/services/agents/make.ts @@ -53,7 +53,7 @@ function makeTraces(traces: any): ConnectionDiagnosticTrace[] { export function makeLabelMapOfStrArrs(labels: ResourceLabel[] = []) { const m: Record = {}; - labels.forEach(label => { + labels?.forEach(label => { if (!m[label.name]) { m[label.name] = []; } diff --git a/web/packages/teleport/src/services/api/api.ts b/web/packages/teleport/src/services/api/api.ts index 5b19aef0bf580..9c75858a05e58 100644 --- a/web/packages/teleport/src/services/api/api.ts +++ b/web/packages/teleport/src/services/api/api.ts @@ -23,7 +23,7 @@ import websession from 'teleport/services/websession'; import { MfaChallengeResponse } from '../mfa'; import { storageService } from '../storageService'; -import parseError, { ApiError } from './parseError'; +import parseError, { ApiError, parseProxyVersion } from './parseError'; export const MFA_HEADER = 'Teleport-Mfa-Response'; @@ -148,10 +148,11 @@ const api = { try { json = await response.json(); } catch (err) { + // error reading JSON const message = response.ok ? err.message : `${response.status} - ${response.url}`; - throw new ApiError(message, response, { cause: err }); + throw new ApiError({ message, response, opts: { cause: err } }); } if (response.ok) { @@ -176,7 +177,12 @@ const api = { ); const shouldRetry = isAdminActionMfaError && !mfaResponse; if (!shouldRetry) { - throw new ApiError(parseError(json), response, undefined, json.messages); + throw new ApiError({ + message: parseError(json), + response, + proxyVersion: parseProxyVersion(json), + messages: json.messages, + }); } let mfaResponseForRetry; diff --git a/web/packages/teleport/src/services/api/parseError.ts b/web/packages/teleport/src/services/api/parseError.ts index 3ef3e43190bbb..cdf326e00a222 100644 --- a/web/packages/teleport/src/services/api/parseError.ts +++ b/web/packages/teleport/src/services/api/parseError.ts @@ -16,6 +16,42 @@ * along with this program. If not, see . */ +/** + * The version of the proxy where the error occurred. + * + * Currently, the proxy version field is only returned + * with api routes "not found" error. + * + * Used to determine outdated proxies. + * + * This response was introduced in v17.2.0. + */ +interface ProxyVersion { + major: number; + minor: number; + patch: number; + /** + * defined if version is not for production eg: + * the prerelease value for version 17.0.0-dev, is "dev" + */ + preRelease: string; + /** + * full version in string eg: "17.0.0-dev" + */ + string: string; +} + +interface ApiErrorConstructor { + /** + * message is the main error, usually the "cause" of the error. + */ + message: string; + response: Response; + proxyVersion?: ProxyVersion; + opts?: ErrorOptions; + messages?: string[]; +} + export default function parseError(json) { let msg = ''; @@ -29,6 +65,10 @@ export default function parseError(json) { return msg; } +export function parseProxyVersion(json): ProxyVersion | undefined { + return json?.fields?.proxyVersion; +} + export class ApiError extends Error { response: Response; /** @@ -41,17 +81,23 @@ export class ApiError extends Error { */ messages: string[]; - constructor( - message: string, - response: Response, - opts?: ErrorOptions, - messages?: string[] - ) { - // message is the main error, usually the "cause" of the error. + /** + * Only defined with api routes "not found" error. + */ + proxyVersion?: ProxyVersion; + + constructor({ + message, + response, + proxyVersion, + opts, + messages, + }: ApiErrorConstructor) { message = message || 'Unknown error'; super(message, opts); this.response = response; this.name = 'ApiError'; this.messages = messages || []; + this.proxyVersion = proxyVersion; } } diff --git a/web/packages/teleport/src/services/integrations/integrations.test.ts b/web/packages/teleport/src/services/integrations/integrations.test.ts index 1d636f068221f..ccab940bbf8af 100644 --- a/web/packages/teleport/src/services/integrations/integrations.test.ts +++ b/web/packages/teleport/src/services/integrations/integrations.test.ts @@ -22,6 +22,10 @@ import api from 'teleport/services/api'; import { integrationService } from './integrations'; import { IntegrationAudience, IntegrationStatusCode } from './types'; +beforeEach(() => { + jest.resetAllMocks(); +}); + test('fetch a single integration: fetchIntegration()', async () => { // test a valid response jest.spyOn(api, 'get').mockResolvedValue(awsOidcIntegration); @@ -196,6 +200,50 @@ test('fetchAwsDatabases response', async () => { }); }); +test('enrollEksClusters without labels calls v1', async () => { + jest.spyOn(api, 'post').mockResolvedValue({}); + + await integrationService.enrollEksClusters('integration', { + region: 'us-east-1', + enableAppDiscovery: false, + clusterNames: ['cluster'], + }); + + expect(api.post).toHaveBeenCalledWith( + cfg.getEnrollEksClusterUrl('integration'), + { + clusterNames: ['cluster'], + enableAppDiscovery: false, + region: 'us-east-1', + }, + null, + undefined + ); +}); + +test('enrollEksClusters with labels calls v2', async () => { + jest.spyOn(api, 'post').mockResolvedValue({}); + + await integrationService.enrollEksClusters('integration', { + region: 'us-east-1', + enableAppDiscovery: false, + clusterNames: ['cluster'], + extraLabels: [{ name: 'env', value: 'staging' }], + }); + + expect(api.post).toHaveBeenCalledWith( + cfg.getEnrollEksClusterUrlV2('integration'), + { + clusterNames: ['cluster'], + enableAppDiscovery: false, + region: 'us-east-1', + extraLabels: [{ name: 'env', value: 'staging' }], + }, + null, + undefined + ); +}); + describe('fetchAwsDatabases() request body formatting', () => { test.each` protocol | expectedEngines | expectedRdsType diff --git a/web/packages/teleport/src/services/integrations/integrations.ts b/web/packages/teleport/src/services/integrations/integrations.ts index 7b1ffa0b1724d..e2eef4a21c58d 100644 --- a/web/packages/teleport/src/services/integrations/integrations.ts +++ b/web/packages/teleport/src/services/integrations/integrations.ts @@ -23,6 +23,7 @@ import { App } from '../apps'; import makeApp from '../apps/makeApps'; import auth, { MfaChallengeScope } from '../auth/auth'; import makeNode from '../nodes/makeNode'; +import { withUnsupportedLabelFeatureErrorConversion } from '../version/unsupported'; import { AwsDatabaseVpcsResponse, AwsOidcDeployDatabaseServicesRequest, @@ -319,11 +320,26 @@ export const integrationService = { ): Promise { const mfaResponse = await auth.getMfaChallengeResponseForAdminAction(true); - return api.post( - cfg.getEnrollEksClusterUrl(integrationName), - req, - null, - mfaResponse + // TODO(kimlisa): DELETE IN 19.0 - replaced by v2 endpoint. + if (!req.extraLabels?.length) { + return api.post( + cfg.getEnrollEksClusterUrl(integrationName), + req, + null, + mfaResponse + ); + } + + return ( + api + .post( + cfg.getEnrollEksClusterUrlV2(integrationName), + req, + null, + mfaResponse + ) + // TODO(kimlisa): DELETE IN 19.0 + .catch(withUnsupportedLabelFeatureErrorConversion) ); }, diff --git a/web/packages/teleport/src/services/integrations/types.ts b/web/packages/teleport/src/services/integrations/types.ts index ff8f4347b985c..e409136ae2e9e 100644 --- a/web/packages/teleport/src/services/integrations/types.ts +++ b/web/packages/teleport/src/services/integrations/types.ts @@ -18,6 +18,7 @@ import { Label } from 'teleport/types'; +import { ResourceLabel } from '../agents'; import { Node } from '../nodes'; /** @@ -539,6 +540,11 @@ export type EnrollEksClustersRequest = { region: string; enableAppDiscovery: boolean; clusterNames: string[]; + /** + * User provided labels. + * Only supported with V2 endpoint + */ + extraLabels?: ResourceLabel[]; }; export type EnrollEksClustersResponse = { diff --git a/web/packages/teleport/src/services/joinToken/joinToken.test.ts b/web/packages/teleport/src/services/joinToken/joinToken.test.ts index 1f941345c1006..6a45afe0824f0 100644 --- a/web/packages/teleport/src/services/joinToken/joinToken.test.ts +++ b/web/packages/teleport/src/services/joinToken/joinToken.test.ts @@ -22,6 +22,10 @@ import api from 'teleport/services/api'; import JoinTokenService from './joinToken'; import type { JoinTokenRequest } from './types'; +beforeEach(() => { + jest.resetAllMocks(); +}); + test('fetchJoinToken with an empty request properly sets defaults', () => { const svc = new JoinTokenService(); jest.spyOn(api, 'post').mockResolvedValue(null); @@ -29,7 +33,7 @@ test('fetchJoinToken with an empty request properly sets defaults', () => { // Test with all empty fields. svc.fetchJoinToken({} as any); expect(api.post).toHaveBeenCalledWith( - cfg.getJoinTokenUrl(), + cfg.api.discoveryJoinToken.create, { roles: undefined, join_method: 'token', @@ -52,7 +56,7 @@ test('fetchJoinToken request fields are set as requested', () => { }; svc.fetchJoinToken(mock); expect(api.post).toHaveBeenCalledWith( - cfg.getJoinTokenUrl(), + cfg.api.discoveryJoinToken.create, { roles: ['Node'], join_method: 'iam', @@ -62,3 +66,23 @@ test('fetchJoinToken request fields are set as requested', () => { null ); }); + +test('fetchJoinToken with labels calls v2 endpoint', () => { + const svc = new JoinTokenService(); + jest.spyOn(api, 'post').mockResolvedValue(null); + + const mock: JoinTokenRequest = { + suggestedLabels: [{ name: 'env', value: 'testing' }], + }; + svc.fetchJoinToken(mock); + expect(api.post).toHaveBeenCalledWith( + cfg.api.discoveryJoinToken.createV2, + { + suggested_labels: { env: ['testing'] }, + suggested_agent_matcher_labels: {}, + join_method: 'token', + allow: [], + }, + null + ); +}); diff --git a/web/packages/teleport/src/services/joinToken/joinToken.ts b/web/packages/teleport/src/services/joinToken/joinToken.ts index fe564b4440dae..66d6f0b20894f 100644 --- a/web/packages/teleport/src/services/joinToken/joinToken.ts +++ b/web/packages/teleport/src/services/joinToken/joinToken.ts @@ -20,6 +20,7 @@ import cfg from 'teleport/config'; import api from 'teleport/services/api'; import { makeLabelMapOfStrArrs } from '../agents/make'; +import { withUnsupportedLabelFeatureErrorConversion } from '../version/unsupported'; import makeJoinToken from './makeJoinToken'; import { JoinRule, JoinToken, JoinTokenRequest } from './types'; @@ -31,20 +32,43 @@ class JoinTokenService { req: JoinTokenRequest, signal: AbortSignal = null ): Promise { - return api - .post( - cfg.getJoinTokenUrl(), - { - roles: req.roles, - join_method: req.method || 'token', - allow: makeAllowField(req.rules || []), - suggested_agent_matcher_labels: makeLabelMapOfStrArrs( - req.suggestedAgentMatcherLabels - ), - }, - signal - ) - .then(makeJoinToken); + // TODO(kimlisa): DELETE IN 19.0 - replaced by v2 endpoint. + if (!req.suggestedLabels?.length) { + return api + .post( + cfg.api.discoveryJoinToken.create, + { + roles: req.roles, + join_method: req.method || 'token', + allow: makeAllowField(req.rules || []), + suggested_agent_matcher_labels: makeLabelMapOfStrArrs( + req.suggestedAgentMatcherLabels + ), + }, + signal + ) + .then(makeJoinToken); + } + + return ( + api + .post( + cfg.api.discoveryJoinToken.createV2, + { + roles: req.roles, + join_method: req.method || 'token', + allow: makeAllowField(req.rules || []), + suggested_agent_matcher_labels: makeLabelMapOfStrArrs( + req.suggestedAgentMatcherLabels + ), + suggested_labels: makeLabelMapOfStrArrs(req.suggestedLabels), + }, + signal + ) + .then(makeJoinToken) + // TODO(kimlisa): DELETE IN 19.0 + .catch(withUnsupportedLabelFeatureErrorConversion) + ); } upsertJoinTokenYAML( diff --git a/web/packages/teleport/src/services/joinToken/types.ts b/web/packages/teleport/src/services/joinToken/types.ts index a36c1a975d1bd..3daa8f3322a70 100644 --- a/web/packages/teleport/src/services/joinToken/types.ts +++ b/web/packages/teleport/src/services/joinToken/types.ts @@ -138,4 +138,13 @@ export type JoinTokenRequest = { method?: JoinMethod; // content is the yaml content of the joinToken to be created content?: string; + /** + * User provided labels. + * SuggestedLabels is a set of labels that resources should set when using this token to enroll + * themselves in the cluster. + * Currently, only node-join scripts create a configuration according to the suggestion. + * + * Only supported with V2 endpoint. + */ + suggestedLabels?: ResourceLabel[]; }; diff --git a/web/packages/teleport/src/services/version/unsupported.ts b/web/packages/teleport/src/services/version/unsupported.ts new file mode 100644 index 0000000000000..df21c804c8df4 --- /dev/null +++ b/web/packages/teleport/src/services/version/unsupported.ts @@ -0,0 +1,33 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +import { ApiError } from '../api/parseError'; + +export function withUnsupportedLabelFeatureErrorConversion( + err: unknown +): never { + if (err instanceof ApiError && err.response.status === 404) { + throw new Error( + 'We could not complete your request. ' + + 'Your proxy may be behind the minimum required version ' + + `(v17.2.0) to support adding resource labels. ` + + 'Ensure all proxies are upgraded or remove labels and try again.' + ); + } + throw err; +} From 792eaa780abb8874ad95d81786a9446cbe71e42d Mon Sep 17 00:00:00 2001 From: Bernard Kim Date: Thu, 9 Jan 2025 12:16:33 -0800 Subject: [PATCH 32/45] Fix Azure join method throttling (#50251) * Validate Azure join using JWT claims * Add note about User-Agent --- lib/auth/bot_test.go | 2 +- lib/auth/join_azure.go | 114 ++++++++--- lib/auth/join_azure_test.go | 379 +++++++++++++++++++++++++++++++----- 3 files changed, 422 insertions(+), 73 deletions(-) diff --git a/lib/auth/bot_test.go b/lib/auth/bot_test.go index 2e019ffa7123e..85e192222c815 100644 --- a/lib/auth/bot_test.go +++ b/lib/auth/bot_test.go @@ -711,7 +711,7 @@ func TestRegisterBot_RemoteAddr(t *testing.T) { rsID := vmResourceID(subID, resourceGroup, "test-vm") vmID := "vmID" - accessToken, err := makeToken(rsID, a.clock.Now()) + accessToken, err := makeToken(rsID, "", a.clock.Now()) require.NoError(t, err) // add token to auth server diff --git a/lib/auth/join_azure.go b/lib/auth/join_azure.go index df5a1632e05e0..fcfd43f90026b 100644 --- a/lib/auth/join_azure.go +++ b/lib/auth/join_azure.go @@ -19,10 +19,12 @@ package auth import ( + "cmp" "context" "crypto/x509" "encoding/base64" "encoding/pem" + "log/slog" "net/url" "slices" "strings" @@ -30,6 +32,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/coreos/go-oidc" "github.com/digitorus/pkcs7" "github.com/go-jose/go-jose/v3/jwt" @@ -44,7 +48,14 @@ import ( "github.com/gravitational/teleport/lib/utils" ) -const azureAccessTokenAudience = "https://management.azure.com/" +const ( + azureAccessTokenAudience = "https://management.azure.com/" + + // azureUserAgent specifies the Azure User-Agent identification for telemetry. + azureUserAgent = "teleport" + // azureVirtualMachine specifies the Azure virtual machine resource type. + azureVirtualMachine = "virtualMachines" +) // Structs for unmarshaling attested data. Schema can be found at // https://learn.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service?tabs=linux#response-2 @@ -77,9 +88,23 @@ type attestedData struct { type accessTokenClaims struct { jwt.Claims - ResourceID string `json:"xms_mirid"` - TenantID string `json:"tid"` - Version string `json:"ver"` + TenantID string `json:"tid"` + Version string `json:"ver"` + + // Azure JWT tokens include two optional claims that can be used to validate + // the subscription and resource group of a joining node. These claims hold + // different values depending on the assigned Managed Identity of the Azure VM: + // - xms_mirid: + // - For System-Assigned Identity it represents the resource id of the VM. + // - For User-Assigned Identity it represents the resource id of the user-assigned identity. + // - xms_az_rid: + // - For System-Assigned Identity this claim is omitted. + // - For User-Assigned Identity it represents the resource id of the VM. + // + // More details at: https://learn.microsoft.com/en-us/answers/questions/1282788/existence-of-xms-az-rid-field-in-activity-logs-of + + ManangedIdentityResourceID string `json:"xms_mirid"` + AzureResourceID string `json:"xms_az_rid"` } type azureVerifyTokenFunc func(ctx context.Context, rawIDToken string) (*accessTokenClaims, error) @@ -145,7 +170,16 @@ func (cfg *azureRegisterConfig) CheckAndSetDefaults(ctx context.Context) error { } if cfg.getVMClient == nil { cfg.getVMClient = func(subscriptionID string, token *azure.StaticCredential) (azure.VirtualMachinesClient, error) { - client, err := azure.NewVirtualMachinesClient(subscriptionID, token, nil) + // The User-Agent is added for debugging purposes. It helps identify + // and isolate teleport traffic. + opts := &armpolicy.ClientOptions{ + ClientOptions: policy.ClientOptions{ + Telemetry: policy.TelemetryOptions{ + ApplicationID: azureUserAgent, + }, + }, + } + client, err := azure.NewVirtualMachinesClient(subscriptionID, token, opts) return client, trace.Wrap(err) } } @@ -211,8 +245,16 @@ func parseAndVerifyAttestedData(ctx context.Context, adBytes []byte, challenge s } // verifyVMIdentity verifies that the provided access token came from the -// correct Azure VM. -func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken, subscriptionID, vmID string, requestStart time.Time) (*azure.VirtualMachine, error) { +// correct Azure VM. Returns the Aure join attributes +func verifyVMIdentity( + ctx context.Context, + cfg *azureRegisterConfig, + accessToken, + subscriptionID, + vmID string, + requestStart time.Time, + logger *slog.Logger, +) (joinAttrs *workloadidentityv1pb.JoinAttrsAzure, err error) { tokenClaims, err := cfg.verify(ctx, accessToken) if err != nil { return nil, trace.Wrap(err) @@ -240,6 +282,20 @@ func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken return nil, trace.Wrap(err) } + // Listing all VMs in an Azure subscription during the verification process + // is problematic when there are a large number of VMs in an Azure subscription. + // In some cases this can lead to throttling due to Azure API rate limits. + // To address the issue, the verification process will first attempt to + // parse required VM identifiers from the token claims. If this method fails, + // fallback to the original method of listing VMs and parsing the VM identifiers + // from the VM resource. + vmSubscription, vmResourceGroup, err := claimsToIdentifiers(tokenClaims) + if err == nil { + return azureJoinToAttrs(vmSubscription, vmResourceGroup), nil + } + logger.WarnContext(ctx, "Failed to parse VM identifiers from claims. Retrying with Azure VM API.", + "error", err) + tokenCredential := azure.NewStaticCredential(azcore.AccessToken{ Token: accessToken, ExpiresOn: tokenClaims.Expiry.Time(), @@ -249,7 +305,7 @@ func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken return nil, trace.Wrap(err) } - resourceID, err := arm.ParseResourceID(tokenClaims.ResourceID) + resourceID, err := arm.ParseResourceID(tokenClaims.ManangedIdentityResourceID) if err != nil { return nil, trace.Wrap(err) } @@ -258,8 +314,8 @@ func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken // If the token is from the system-assigned managed identity, the resource ID // is for the VM itself and we can use it to look up the VM. - if slices.Contains(resourceID.ResourceType.Types, "virtualMachines") { - vm, err = vmClient.Get(ctx, tokenClaims.ResourceID) + if slices.Contains(resourceID.ResourceType.Types, azureVirtualMachine) { + vm, err = vmClient.Get(ctx, tokenClaims.ManangedIdentityResourceID) if err != nil { return nil, trace.Wrap(err) } @@ -278,21 +334,35 @@ func verifyVMIdentity(ctx context.Context, cfg *azureRegisterConfig, accessToken return nil, trace.Wrap(err) } } + return azureJoinToAttrs(vm.Subscription, vm.ResourceGroup), nil +} - return vm, nil +// claimsToIdentifiers returns the vm identifiers from the provided claims. +func claimsToIdentifiers(tokenClaims *accessTokenClaims) (subscriptionID, resourceGroupID string, err error) { + // xms_az_rid claim is omitted when the VM is assigned a System-Assigned Identity. + // The xms_mirid claim should be used instead. + rid := cmp.Or(tokenClaims.AzureResourceID, tokenClaims.ManangedIdentityResourceID) + resourceID, err := arm.ParseResourceID(rid) + if err != nil { + return "", "", trace.Wrap(err, "failed to parse resource id from claims") + } + if !slices.Contains(resourceID.ResourceType.Types, azureVirtualMachine) { + return "", "", trace.BadParameter("unexpected resource type: %q", resourceID.ResourceType.Type) + } + return resourceID.SubscriptionID, resourceID.ResourceGroupName, nil } -func checkAzureAllowRules(vm *azure.VirtualMachine, token string, allowRules []*types.ProvisionTokenSpecV2Azure_Rule) error { - for _, rule := range allowRules { - if rule.Subscription != vm.Subscription { +func checkAzureAllowRules(vmID string, attrs *workloadidentityv1pb.JoinAttrsAzure, token *types.ProvisionTokenV2) error { + for _, rule := range token.Spec.Azure.Allow { + if rule.Subscription != attrs.Subscription { continue } - if !azureResourceGroupIsAllowed(rule.ResourceGroups, vm.ResourceGroup) { + if !azureResourceGroupIsAllowed(rule.ResourceGroups, attrs.ResourceGroup) { continue } return nil } - return trace.AccessDenied("instance %v did not match any allow rules in token %v", vm.Name, token) + return trace.AccessDenied("instance %v did not match any allow rules in token %v", vmID, token.GetName()) } func azureResourceGroupIsAllowed(allowedResourceGroups []string, vmResourceGroup string) bool { if len(allowedResourceGroups) == 0 { @@ -313,10 +383,10 @@ func azureResourceGroupIsAllowed(allowedResourceGroups []string, vmResourceGroup return false } -func azureJoinToAttrs(vm *azure.VirtualMachine) *workloadidentityv1pb.JoinAttrsAzure { +func azureJoinToAttrs(subscriptionID, resourceGroupID string) *workloadidentityv1pb.JoinAttrsAzure { return &workloadidentityv1pb.JoinAttrsAzure{ - Subscription: vm.Subscription, - ResourceGroup: vm.ResourceGroup, + Subscription: subscriptionID, + ResourceGroup: resourceGroupID, } } @@ -345,13 +415,11 @@ func (a *Server) checkAzureRequest( return nil, trace.Wrap(err) } - vm, err := verifyVMIdentity(ctx, cfg, req.AccessToken, subID, vmID, requestStart) + attrs, err := verifyVMIdentity(ctx, cfg, req.AccessToken, subID, vmID, requestStart, a.logger) if err != nil { return nil, trace.Wrap(err) } - attrs := azureJoinToAttrs(vm) - - if err := checkAzureAllowRules(vm, token.GetName(), token.Spec.Azure.Allow); err != nil { + if err := checkAzureAllowRules(vmID, attrs, token); err != nil { return attrs, trace.Wrap(err) } diff --git a/lib/auth/join_azure_test.go b/lib/auth/join_azure_test.go index 0944e1ac9ed48..c7cc7c5b18954 100644 --- a/lib/auth/join_azure_test.go +++ b/lib/auth/join_azure_test.go @@ -104,12 +104,16 @@ func withChallengeAzure(challenge string) azureChallengeResponseOption { } func vmResourceID(subscription, resourceGroup, name string) string { - return resourceID("virtualMachines", subscription, resourceGroup, name) + return resourceID("Microsoft.Compute/virtualMachines", subscription, resourceGroup, name) +} + +func identityResourceID(subscription, resourceGroup, name string) string { + return resourceID("Microsoft.ManagedIdentity/userAssignedIdentities", subscription, resourceGroup, name) } func resourceID(resourceType, subscription, resourceGroup, name string) string { return fmt.Sprintf( - "/subscriptions/%v/resourcegroups/%v/providers/Microsoft.Compute/%v/%v", + "/subscriptions/%v/resourcegroups/%v/providers/%v/%v", subscription, resourceGroup, resourceType, name, ) } @@ -131,7 +135,7 @@ func mockVerifyToken(err error) azureVerifyTokenFunc { } } -func makeToken(resourceID string, issueTime time.Time) (string, error) { +func makeToken(managedIdentityResourceID, azureResourceID string, issueTime time.Time) (string, error) { sig, err := jose.NewSigner(jose.SigningKey{ Algorithm: jose.HS256, Key: []byte("test-key"), @@ -149,9 +153,10 @@ func makeToken(resourceID string, issueTime time.Time) (string, error) { Expiry: jwt.NewNumericDate(issueTime.Add(time.Minute)), ID: "id", }, - ResourceID: resourceID, - TenantID: "test-tenant-id", - Version: "1.0", + ManangedIdentityResourceID: managedIdentityResourceID, + AzureResourceID: azureResourceID, + TenantID: "test-tenant-id", + Version: "1.0", } raw, err := jwt.Signed(sig).Claims(claims).CompactSerialize() if err != nil { @@ -189,28 +194,28 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) { isBadParameter := func(t require.TestingT, err error, _ ...any) { require.True(t, trace.IsBadParameter(err), "expected Bad Parameter error, actual error: %v", err) } - isNotFound := func(t require.TestingT, err error, _ ...any) { - require.True(t, trace.IsNotFound(err), "expected Not Found error, actual error: %v", err) - } defaultSubscription := uuid.NewString() defaultResourceGroup := "my-resource-group" - defaultName := "test-vm" + defaultVMName := "test-vm" + defaultIdentityName := "test-id" defaultVMID := "my-vm-id" - defaultResourceID := vmResourceID(defaultSubscription, defaultResourceGroup, defaultName) + defaultVMResourceID := vmResourceID(defaultSubscription, defaultResourceGroup, defaultVMName) + defaultIdentityResourceID := identityResourceID(defaultSubscription, defaultResourceGroup, defaultIdentityName) tests := []struct { - name string - tokenResourceID string - tokenSubscription string - tokenVMID string - requestTokenName string - tokenSpec types.ProvisionTokenSpecV2 - challengeResponseOptions []azureChallengeResponseOption - challengeResponseErr error - certs []*x509.Certificate - verify azureVerifyTokenFunc - assertError require.ErrorAssertionFunc + name string + tokenManagedIdentityResourceID string + tokenAzureResourceID string + tokenSubscription string + tokenVMID string + requestTokenName string + tokenSpec types.ProvisionTokenSpecV2 + challengeResponseOptions []azureChallengeResponseOption + challengeResponseErr error + certs []*x509.Certificate + verify azureVerifyTokenFunc + assertError require.ErrorAssertionFunc }{ { name: "basic passing case", @@ -380,10 +385,11 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) { assertError: require.Error, }, { - name: "attested data and access token from different VMs", - requestTokenName: "test-token", - tokenSubscription: defaultSubscription, - tokenVMID: "some-other-vm-id", + name: "attested data and access token from different VMs", + requestTokenName: "test-token", + tokenSubscription: defaultSubscription, + tokenVMID: "some-other-vm-id", + tokenManagedIdentityResourceID: defaultIdentityResourceID, tokenSpec: types.ProvisionTokenSpecV2{ Roles: []types.SystemRole{types.RoleNode}, Azure: &types.ProvisionTokenSpecV2Azure{ @@ -400,11 +406,11 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) { assertError: isAccessDenied, }, { - name: "vm not found", - requestTokenName: "test-token", - tokenSubscription: defaultSubscription, - tokenVMID: defaultVMID, - tokenResourceID: vmResourceID(defaultSubscription, "nonexistent-group", defaultName), + name: "vm not found", + requestTokenName: "test-token", + tokenSubscription: defaultSubscription, + tokenVMID: "invalid-id", + tokenManagedIdentityResourceID: identityResourceID(defaultSubscription, defaultResourceGroup, "invalid-vm"), tokenSpec: types.ProvisionTokenSpecV2{ Roles: []types.SystemRole{types.RoleNode}, Azure: &types.ProvisionTokenSpecV2Azure{ @@ -418,14 +424,14 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) { }, verify: mockVerifyToken(nil), certs: []*x509.Certificate{tlsConfig.Certificate}, - assertError: isNotFound, + assertError: isAccessDenied, }, { - name: "lookup vm by id", - requestTokenName: "test-token", - tokenSubscription: defaultSubscription, - tokenVMID: defaultVMID, - tokenResourceID: resourceID("some.other.provider", defaultSubscription, defaultResourceGroup, defaultName), + name: "lookup vm by id", + requestTokenName: "test-token", + tokenSubscription: defaultSubscription, + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: defaultIdentityResourceID, tokenSpec: types.ProvisionTokenSpecV2{ Roles: []types.SystemRole{types.RoleNode}, Azure: &types.ProvisionTokenSpecV2Azure{ @@ -442,11 +448,11 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) { assertError: require.NoError, }, { - name: "vm is in a different subscription than the token it provides", - requestTokenName: "test-token", - tokenSubscription: defaultSubscription, - tokenVMID: defaultVMID, - tokenResourceID: resourceID("some.other.provider", "some-other-subscription", defaultResourceGroup, defaultName), + name: "vm is in a different subscription than the token it provides", + requestTokenName: "test-token", + tokenSubscription: defaultSubscription, + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: identityResourceID("some-other-subscription", defaultResourceGroup, defaultVMName), tokenSpec: types.ProvisionTokenSpecV2{ Roles: []types.SystemRole{types.RoleNode}, Azure: &types.ProvisionTokenSpecV2Azure{ @@ -476,19 +482,19 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) { require.NoError(t, a.DeleteToken(ctx, token.GetName())) }) - rsID := tc.tokenResourceID - if rsID == "" { - rsID = vmResourceID(defaultSubscription, defaultResourceGroup, defaultName) + mirID := tc.tokenManagedIdentityResourceID + if mirID == "" { + mirID = vmResourceID(defaultSubscription, defaultResourceGroup, defaultVMName) } - accessToken, err := makeToken(rsID, a.clock.Now()) + accessToken, err := makeToken(mirID, "", a.clock.Now()) require.NoError(t, err) vmClient := &mockAzureVMClient{ vms: map[string]*azure.VirtualMachine{ - defaultResourceID: { - ID: defaultResourceID, - Name: defaultName, + defaultVMResourceID: { + ID: defaultVMResourceID, + Name: defaultVMName, Subscription: defaultSubscription, ResourceGroup: defaultResourceGroup, VMID: defaultVMID, @@ -541,3 +547,278 @@ func TestAuth_RegisterUsingAzureMethod(t *testing.T) { }) } } + +// TestAuth_RegisterUsingAzureClaims tests the Azure join method by verifying +// joining VMs by the token claims rather than from the Azure VM API. +func TestAuth_RegisterUsingAzureClaims(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + p, err := newTestPack(ctx, t.TempDir()) + require.NoError(t, err) + a := p.a + + sshPrivateKey, sshPublicKey, err := testauthority.New().GenerateKeyPair() + require.NoError(t, err) + + tlsConfig, err := fixtures.LocalTLSConfig() + require.NoError(t, err) + + block, _ := pem.Decode(fixtures.LocalhostKey) + pkey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + require.NoError(t, err) + + tlsPublicKey, err := PrivateKeyToPublicKeyTLS(sshPrivateKey) + require.NoError(t, err) + + isAccessDenied := func(t require.TestingT, err error, _ ...any) { + require.True(t, trace.IsAccessDenied(err), "expected Access Denied error, actual error: %v", err) + } + defaultSubscription := uuid.NewString() + defaultResourceGroup := "my-resource-group" + defaultVMName := "test-vm" + defaultIdentityName := "test-id" + defaultVMID := "my-vm-id" + + tests := []struct { + name string + tokenManagedIdentityResourceID string + tokenAzureResourceID string + tokenSubscription string + tokenVMID string + requestTokenName string + tokenSpec types.ProvisionTokenSpecV2 + challengeResponseOptions []azureChallengeResponseOption + challengeResponseErr error + certs []*x509.Certificate + verify azureVerifyTokenFunc + assertError require.ErrorAssertionFunc + }{ + { + name: "system-managed identity ok", + requestTokenName: "test-token", + tokenSubscription: "system-managed-test", + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: vmResourceID("system-managed-test", "system-managed-test", defaultVMName), + tokenSpec: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + Azure: &types.ProvisionTokenSpecV2Azure{ + Allow: []*types.ProvisionTokenSpecV2Azure_Rule{ + { + Subscription: "system-managed-test", + ResourceGroups: []string{"system-managed-test"}, + }, + }, + }, + JoinMethod: types.JoinMethodAzure, + }, + verify: mockVerifyToken(nil), + certs: []*x509.Certificate{tlsConfig.Certificate}, + assertError: require.NoError, + }, + { + name: "system-managed identity with wrong subscription", + requestTokenName: "test-token", + tokenSubscription: "system-managed-test", + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: vmResourceID("system-managed-test", "system-managed-test", defaultVMName), + tokenSpec: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + Azure: &types.ProvisionTokenSpecV2Azure{ + Allow: []*types.ProvisionTokenSpecV2Azure_Rule{ + { + Subscription: defaultSubscription, + ResourceGroups: []string{"system-managed-test"}, + }, + }, + }, + JoinMethod: types.JoinMethodAzure, + }, + verify: mockVerifyToken(nil), + certs: []*x509.Certificate{tlsConfig.Certificate}, + assertError: isAccessDenied, + }, + { + name: "system-managed identity with wrong resource group", + requestTokenName: "test-token", + tokenSubscription: "system-managed-test", + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: vmResourceID("system-managed-test", "system-managed-test", defaultVMName), + tokenSpec: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + Azure: &types.ProvisionTokenSpecV2Azure{ + Allow: []*types.ProvisionTokenSpecV2Azure_Rule{ + { + Subscription: "system-managed-test", + ResourceGroups: []string{defaultResourceGroup}, + }, + }, + }, + JoinMethod: types.JoinMethodAzure, + }, + verify: mockVerifyToken(nil), + certs: []*x509.Certificate{tlsConfig.Certificate}, + assertError: isAccessDenied, + }, + { + name: "user-managed identity ok", + requestTokenName: "test-token", + tokenSubscription: "user-managed-test", + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: identityResourceID("user-managed-test", "user-managed-test", defaultIdentityName), + tokenAzureResourceID: vmResourceID("user-managed-test", "user-managed-test", defaultVMName), + tokenSpec: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + Azure: &types.ProvisionTokenSpecV2Azure{ + Allow: []*types.ProvisionTokenSpecV2Azure_Rule{ + { + Subscription: "user-managed-test", + ResourceGroups: []string{"user-managed-test"}, + }, + }, + }, + JoinMethod: types.JoinMethodAzure, + }, + verify: mockVerifyToken(nil), + certs: []*x509.Certificate{tlsConfig.Certificate}, + assertError: require.NoError, + }, + { + name: "user-managed identity with wrong subscription", + requestTokenName: "test-token", + tokenSubscription: "user-managed-test", + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: identityResourceID("user-managed-test", "user-managed-test", defaultIdentityName), + tokenAzureResourceID: vmResourceID("user-managed-test", "user-managed-test", defaultVMName), + tokenSpec: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + Azure: &types.ProvisionTokenSpecV2Azure{ + Allow: []*types.ProvisionTokenSpecV2Azure_Rule{ + { + Subscription: defaultSubscription, + ResourceGroups: []string{"user-managed-test"}, + }, + }, + }, + JoinMethod: types.JoinMethodAzure, + }, + verify: mockVerifyToken(nil), + certs: []*x509.Certificate{tlsConfig.Certificate}, + assertError: isAccessDenied, + }, + { + name: "user-managed identity with wrong resource group", + requestTokenName: "test-token", + tokenSubscription: "user-managed-test", + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: identityResourceID("user-managed-test", "user-managed-test", defaultIdentityName), + tokenAzureResourceID: vmResourceID("user-managed-test", "user-managed-test", defaultVMName), + tokenSpec: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + Azure: &types.ProvisionTokenSpecV2Azure{ + Allow: []*types.ProvisionTokenSpecV2Azure_Rule{ + { + Subscription: "user-managed-test", + ResourceGroups: []string{defaultResourceGroup}, + }, + }, + }, + JoinMethod: types.JoinMethodAzure, + }, + verify: mockVerifyToken(nil), + certs: []*x509.Certificate{tlsConfig.Certificate}, + assertError: isAccessDenied, + }, + { + name: "user-managed identity from different subscription", + requestTokenName: "test-token", + tokenSubscription: "user-managed-test", + tokenVMID: defaultVMID, + tokenManagedIdentityResourceID: identityResourceID("invalid-user-managed-test", "invalid-user-managed-test", defaultIdentityName), + tokenAzureResourceID: vmResourceID("user-managed-test", "user-managed-test", defaultVMName), + tokenSpec: types.ProvisionTokenSpecV2{ + Roles: []types.SystemRole{types.RoleNode}, + Azure: &types.ProvisionTokenSpecV2Azure{ + Allow: []*types.ProvisionTokenSpecV2Azure_Rule{ + { + Subscription: "user-managed-test", + ResourceGroups: []string{"user-managed-test"}, + }, + }, + }, + JoinMethod: types.JoinMethodAzure, + }, + verify: mockVerifyToken(nil), + certs: []*x509.Certificate{tlsConfig.Certificate}, + assertError: require.NoError, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + token, err := types.NewProvisionTokenFromSpec( + "test-token", + time.Now().Add(time.Minute), + tc.tokenSpec) + require.NoError(t, err) + require.NoError(t, a.UpsertToken(ctx, token)) + t.Cleanup(func() { + require.NoError(t, a.DeleteToken(ctx, token.GetName())) + }) + + mirID := tc.tokenManagedIdentityResourceID + azrID := tc.tokenAzureResourceID + accessToken, err := makeToken(mirID, azrID, a.clock.Now()) + require.NoError(t, err) + + vmClient := &mockAzureVMClient{ + vms: map[string]*azure.VirtualMachine{}, + } + getVMClient := makeVMClientGetter(map[string]*mockAzureVMClient{ + defaultSubscription: vmClient, + }) + + _, err = a.RegisterUsingAzureMethodWithOpts(context.Background(), func(challenge string) (*proto.RegisterUsingAzureMethodRequest, error) { + cfg := &azureChallengeResponseConfig{Challenge: challenge} + for _, opt := range tc.challengeResponseOptions { + opt(cfg) + } + + ad := attestedData{ + Nonce: cfg.Challenge, + SubscriptionID: tc.tokenSubscription, + ID: tc.tokenVMID, + } + adBytes, err := json.Marshal(&ad) + require.NoError(t, err) + s, err := pkcs7.NewSignedData(adBytes) + require.NoError(t, err) + require.NoError(t, s.AddSigner(tlsConfig.Certificate, pkey, pkcs7.SignerInfoConfig{})) + signature, err := s.Finish() + require.NoError(t, err) + signedAD := signedAttestedData{ + Encoding: "pkcs7", + Signature: base64.StdEncoding.EncodeToString(signature), + } + signedADBytes, err := json.Marshal(&signedAD) + require.NoError(t, err) + + req := &proto.RegisterUsingAzureMethodRequest{ + RegisterUsingTokenRequest: &types.RegisterUsingTokenRequest{ + Token: tc.requestTokenName, + HostID: "test-node", + Role: types.RoleNode, + PublicSSHKey: sshPublicKey, + PublicTLSKey: tlsPublicKey, + }, + AttestedData: signedADBytes, + AccessToken: accessToken, + } + return req, tc.challengeResponseErr + }, withCerts(tc.certs), withVerifyFunc(tc.verify), withVMClientGetter(getVMClient)) + tc.assertError(t, err) + }) + } +} From 17edb10e43e7fb8afaa12ccdf313fe0df1269e1f Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 9 Jan 2025 15:35:14 -0500 Subject: [PATCH 33/45] Permit routing to agentless nodes with non-UUID metadata.name (#50915) We suggest that a UUID is used for agentless nodes metadata.name field, but we do not enforce it. This causes several edge cases and slightly weird UX in places that expect the name to be a UUID. Most notably, this presents dialing problems for the web ui as described in https://github.com/gravitational/teleport/issues/50914. To allowing dialing to function in all cases for these servers, routing has been updated to permit matches on metadata.name, however, the match is given a lower score then a match on a UUID. This should permit dialing, though, it may still result in ambiguity. Closes #50914. --- api/utils/route.go | 8 +++++++ api/utils/route_test.go | 15 ++++++++++++- lib/proxy/router_test.go | 47 +++++++++++++++++++++++++++++++--------- 3 files changed, 59 insertions(+), 11 deletions(-) diff --git a/api/utils/route.go b/api/utils/route.go index 1ca83926e4ee1..af9311758e1ea 100644 --- a/api/utils/route.go +++ b/api/utils/route.go @@ -180,6 +180,14 @@ func (m *SSHRouteMatcher) RouteToServerScore(server RouteableServer) (score int) score = max(score, matchAddr(addr)) } + // Allow a match on name, even though it may not be a UUID or EC2 ID, + // to support agentless hosts that were created without their name being a UUID. + // The score however, is lower than a true direct match, to prevent any + // breaking changes to routing. + if server.GetName() == m.cfg.Host { + score = max(score, indirectMatch) + } + return score } diff --git a/api/utils/route_test.go b/api/utils/route_test.go index ce971e04e6ea3..2ab21fd3df79c 100644 --- a/api/utils/route_test.go +++ b/api/utils/route_test.go @@ -15,6 +15,7 @@ package utils import ( + "cmp" "context" "testing" @@ -273,6 +274,7 @@ func TestSSHRouteMatcherScoring(t *testing.T) { tts := []struct { desc string hostname string + name string addrs []string score int }{ @@ -309,12 +311,23 @@ func TestSSHRouteMatcherScoring(t *testing.T) { }, score: notMatch, }, + { + desc: "non-uuid name", + name: "foo.example.com", + hostname: "foo.com", + addrs: []string{ + "0.0.0.0:0", + "1.1.1.1:0", + }, + score: indirectMatch, + }, } for _, tt := range tts { t.Run(tt.desc, func(t *testing.T) { + name := cmp.Or(tt.name, uuid.NewString()) score := matcher.RouteToServerScore(mockRouteableServer{ - name: uuid.NewString(), + name: name, hostname: tt.hostname, publicAddr: tt.addrs, }) diff --git a/lib/proxy/router_test.go b/lib/proxy/router_test.go index e83ca44a2811d..d18b3ce11663a 100644 --- a/lib/proxy/router_test.go +++ b/lib/proxy/router_test.go @@ -133,6 +133,11 @@ func TestRouteScoring(t *testing.T) { hostname: "blue.example.com", addr: "2.3.4.5:22", }, + { + name: "not-a-uuid", + hostname: "test.example.com", + addr: "3.4.5.6:22", + }, }) // scoring behavior is independent of routing strategy so we just @@ -205,6 +210,11 @@ func TestRouteScoring(t *testing.T) { host: "red.example.com", expect: "blue.example.com", }, + { + desc: "non-uuid name", + host: "not-a-uuid", + expect: "test.example.com", + }, } for _, tt := range tts { @@ -326,6 +336,21 @@ func TestGetServers(t *testing.T) { }, }) + servers = append(servers, + &types.ServerV2{ + Kind: types.KindNode, + SubKind: types.SubKindOpenSSHNode, + Version: types.V2, + Metadata: types.Metadata{ + Name: "agentless-node-1", + }, + Spec: types.ServerSpecV2{ + Addr: "1.2.3.4:22", + Hostname: "agentless-1", + }, + }, + ) + // ensure tests don't have order-dependence rand.Shuffle(len(servers), func(i, j int) { servers[i], servers[j] = servers[j], servers[i] @@ -432,15 +457,6 @@ func TestGetServers(t *testing.T) { require.Equal(t, "alpaca", srv.GetHostname()) }, }, - { - name: "failure on invalid addresses", - site: testSite{cfg: &unambiguousCfg, nodes: servers}, - host: "lion", - errAssertion: require.NoError, - serverAssertion: func(t *testing.T, srv types.Server) { - require.Empty(t, srv) - }, - }, { name: "case-insensitive match", site: testSite{cfg: &unambiguousInsensitiveCfg, nodes: servers}, @@ -462,6 +478,17 @@ func TestGetServers(t *testing.T) { require.Empty(t, srv) }, }, + { + name: "agentless match by non-uuid name", + site: testSite{cfg: &unambiguousCfg, nodes: servers}, + host: "agentless-node-1", + errAssertion: require.NoError, + serverAssertion: func(t *testing.T, srv types.Server) { + require.NotNil(t, srv) + require.Equal(t, "agentless-1", srv.GetHostname()) + require.True(t, srv.IsOpenSSHNode()) + }, + }, } ctx := context.Background() @@ -625,7 +652,7 @@ func TestRouter_DialHost(t *testing.T) { SubKind: types.SubKindOpenSSHNode, Version: types.V2, Metadata: types.Metadata{ - Name: uuid.NewString(), + Name: "agentless", }, Spec: types.ServerSpecV2{ Addr: "127.0.0.1:9001", From e46117a5b1ce7f8c4445868f377b870831a6bb93 Mon Sep 17 00:00:00 2001 From: Lisa Kim Date: Thu, 9 Jan 2025 13:37:50 -0800 Subject: [PATCH 34/45] Fix enterprise webassets path for testing (#50927) --- lib/web/apiserver_test_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/web/apiserver_test_utils.go b/lib/web/apiserver_test_utils.go index 9e6fff840b514..9a98415a504ba 100644 --- a/lib/web/apiserver_test_utils.go +++ b/lib/web/apiserver_test_utils.go @@ -34,7 +34,7 @@ func NewDebugFileSystem(isEnterprise bool) (http.FileSystem, error) { assetsPath := "../../webassets/teleport" if isEnterprise { - assetsPath = "../../../webassets/teleport" + assetsPath = "../../../webassets/e/teleport" } // Ensure we have the built assets available before continuing. From 80732b4b22bfc9b5d3704f17387e20e6b385c205 Mon Sep 17 00:00:00 2001 From: Lisa Kim Date: Thu, 9 Jan 2025 13:38:07 -0800 Subject: [PATCH 35/45] WebDiscover: allow setting resource labels when enrolling single eks, rds, server, kube (#50606) --- .../CreateDatabase/CreateDatabase.tsx | 15 +- .../ManualDeploy/ManualDeploy.tsx | 12 +- .../EnrollRdsDatabase/SingleEnrollment.tsx | 87 +++++-- .../Database/MutualTls/useMutualTls.ts | 6 +- .../EnrollEKSCluster/Dialogs.story.tsx | 2 +- .../EnrollEKSCluster/EnrollEksCluster.tsx | 134 ++++++++--- .../EnrollEKSCluster/ManualHelmDialog.tsx | 21 +- .../HelmChart/HelmChart.story.tsx | 0 .../HelmChart/HelmChart.test.tsx | 0 .../{ => SelfHosted}/HelmChart/HelmChart.tsx | 136 ++++++++--- .../{ => SelfHosted}/HelmChart/index.ts | 4 +- .../Discover/Kubernetes/SelfHosted/index.ts | 19 ++ .../src/Discover/Kubernetes/index.tsx | 2 +- .../DownloadScript/DownloadScript.story.tsx | 10 +- .../Server/DownloadScript/DownloadScript.tsx | 215 +++++++++++++----- .../Shared/LabelsCreater/LabelsCreater.tsx | 2 +- .../Discover/Shared/PingTeleportContext.tsx | 4 + .../Discover/Shared/useJoinTokenSuspender.ts | 25 +- 18 files changed, 513 insertions(+), 181 deletions(-) rename web/packages/teleport/src/Discover/Kubernetes/{ => SelfHosted}/HelmChart/HelmChart.story.tsx (100%) rename web/packages/teleport/src/Discover/Kubernetes/{ => SelfHosted}/HelmChart/HelmChart.test.tsx (100%) rename web/packages/teleport/src/Discover/Kubernetes/{ => SelfHosted}/HelmChart/HelmChart.tsx (80%) rename web/packages/teleport/src/Discover/Kubernetes/{ => SelfHosted}/HelmChart/index.ts (89%) create mode 100644 web/packages/teleport/src/Discover/Kubernetes/SelfHosted/index.ts diff --git a/web/packages/teleport/src/Discover/Database/CreateDatabase/CreateDatabase.tsx b/web/packages/teleport/src/Discover/Database/CreateDatabase/CreateDatabase.tsx index c741944ebf6bc..79bceda4fcae4 100644 --- a/web/packages/teleport/src/Discover/Database/CreateDatabase/CreateDatabase.tsx +++ b/web/packages/teleport/src/Discover/Database/CreateDatabase/CreateDatabase.tsx @@ -25,6 +25,7 @@ import TextEditor from 'shared/components/TextEditor'; import Validation, { Validator } from 'shared/components/Validation'; import { requiredField } from 'shared/components/Validation/rules'; +import { ResourceLabelTooltip } from 'teleport/Discover/Shared/ResourceLabelTooltip'; import type { ResourceLabel } from 'teleport/services/agents'; import { @@ -162,13 +163,13 @@ export function CreateDatabaseView({ /> - Labels (optional) - - Labels make this new database discoverable by the database - service.
- Not defining labels is equivalent to asterisks (any - database service can discover this database). -
+ + Labels (optional) + + (agentMeta.resourceName); const showHint = useShowHint(active); + useEffect(() => { + return () => clearCachedJoinTokenResult([ResourceKind.Database]); + }, []); + function handleNextStep() { updateAgentMeta({ ...agentMeta, diff --git a/web/packages/teleport/src/Discover/Database/EnrollRdsDatabase/SingleEnrollment.tsx b/web/packages/teleport/src/Discover/Database/EnrollRdsDatabase/SingleEnrollment.tsx index eba8893130f3e..d60f8a992535a 100644 --- a/web/packages/teleport/src/Discover/Database/EnrollRdsDatabase/SingleEnrollment.tsx +++ b/web/packages/teleport/src/Discover/Database/EnrollRdsDatabase/SingleEnrollment.tsx @@ -18,13 +18,16 @@ import { useEffect, useState } from 'react'; -import { Text } from 'design'; +import { Flex, Subtitle1, Text } from 'design'; import { FetchStatus } from 'design/DataTable/types'; +import Validation, { Validator } from 'shared/components/Validation'; import { Attempt } from 'shared/hooks/useAttemptNext'; import { getErrMessage } from 'shared/utils/errorType'; import { getRdsEngineIdentifier } from 'teleport/Discover/SelectResource/types'; +import { ResourceLabelTooltip } from 'teleport/Discover/Shared/ResourceLabelTooltip'; import { useDiscover } from 'teleport/Discover/useDiscover'; +import { ResourceLabel } from 'teleport/services/agents'; import { Database } from 'teleport/services/databases'; import { AwsRdsDatabase, @@ -33,7 +36,7 @@ import { Vpc, } from 'teleport/services/integrations'; -import { ActionButtons } from '../../Shared'; +import { ActionButtons, LabelsCreater } from '../../Shared'; import { CreateDatabaseDialog } from '../CreateDatabase/CreateDatabaseDialog'; import { useCreateDatabase } from '../CreateDatabase/useCreateDatabase'; import { DatabaseList } from './RdsDatabaseList'; @@ -90,6 +93,7 @@ export function SingleEnrollment({ const [tableData, setTableData] = useState(); const [selectedDb, setSelectedDb] = useState(); + const [customLabels, setCustomLabels] = useState([]); useEffect(() => { if (vpc) { @@ -98,6 +102,12 @@ export function SingleEnrollment({ } }, [vpc]); + function onSelectRds(rds: CheckedAwsRdsDatabase) { + // when changing selected db, clear defined labels + setCustomLabels([]); + setSelectedDb(rds); + } + function fetchNextPage() { fetchRdsDatabases({ ...tableData }, vpc); } @@ -175,6 +185,17 @@ export function SingleEnrollment({ } } + function handleOnProceedWithValidation( + validator: Validator, + { overwriteDb = false } = {} + ) { + if (!validator.validate()) { + return; + } + + handleOnProceed({ overwriteDb }); + } + function handleOnProceed({ overwriteDb = false } = {}) { // Corner case where if registering db fails a user can: // 1) change region, which will list new databases or @@ -185,7 +206,9 @@ export function SingleEnrollment({ name: selectedDb.name, protocol: selectedDb.engine, uri: selectedDb.uri, - labels: selectedDb.labels, + // The labels from the `selectedDb` are AWS tags which + // will be imported as is. + labels: [...selectedDb.labels, ...customLabels], awsRds: selectedDb, awsRegion: region, awsVpcId: vpc.id, @@ -198,23 +221,47 @@ export function SingleEnrollment({ return ( <> - {showTable && ( - <> - Select an RDS database to enroll: - - - )} - + + {({ validator }) => ( + <> + {showTable && ( + <> + Select an RDS database to enroll: + + {selectedDb && ( + <> + + Optionally Add More Labels + + + + + )} + + )} + handleOnProceedWithValidation(validator)} + disableProceed={disableBtns || !showTable || !selectedDb} + /> + + )} + {attempt.status !== '' && ( (null); + const [customLabels, setCustomLabels] = useState([]); const ctx = useTeleport(); + function onSelectCluster(eks: CheckedEksCluster) { + // when changing selected cluster, clear defined labels + setCustomLabels([]); + setSelectedCluster(eks); + } + + function clearSelectedCluster() { + setSelectedCluster(null); + setCustomLabels([]); + } + function fetchClustersWithNewRegion(region: Regions) { setSelectedRegion(region); // Clear table when fetching with new region. @@ -148,7 +164,7 @@ export function EnrollEksCluster(props: AgentStepProps) { } function refreshClustersList() { - setSelectedCluster(null); + clearSelectedCluster(); // When refreshing, start the table back at page 1. fetchClusters({ ...tableData, startKey: '', items: [] }); } @@ -214,9 +230,7 @@ export function EnrollEksCluster(props: AgentStepProps) { if (tableData.items.length > 0) { setTableData(emptyTableData); } - if (selectedCluster) { - setSelectedCluster(null); - } + clearSelectedCluster(); setEnrollmentState({ status: 'notStarted' }); } @@ -279,6 +293,21 @@ export function EnrollEksCluster(props: AgentStepProps) { } as EksMeta); } + function showManualHelmDialog(validator: Validator) { + if (!validator.validate()) { + return; + } + + setIsManualHelmDialogShown(true); + } + + async function enrollWithValidation(validator: Validator) { + if (!validator.validate()) { + return; + } + return enroll(); + } + async function enroll() { const integrationName = (agentMeta as EksMeta).awsIntegration.name; setEnrollmentState({ status: 'enrolling' }); @@ -290,6 +319,7 @@ export function EnrollEksCluster(props: AgentStepProps) { region: selectedRegion, enableAppDiscovery: isAppDiscoveryEnabled, clusterNames: [selectedCluster.name], + extraLabels: customLabels, } ); @@ -380,7 +410,14 @@ export function EnrollEksCluster(props: AgentStepProps) { isCloud: ctx.isCloud, automaticUpgradesEnabled: ctx.automaticUpgradesEnabled, automaticUpgradesTargetVersion: ctx.automaticUpgradesTargetVersion, - joinLabels: [...selectedCluster.labels, ...selectedCluster.joinLabels], + // The labels from the `selectedCluster` are AWS tags which + // will be imported as is. `joinLabels` are internal Teleport labels + // added to each cluster when listing clusters. + joinLabels: [ + ...selectedCluster.labels, + ...selectedCluster.joinLabels, + ...customLabels, + ], disableAppDiscovery: !isAppDiscoveryEnabled, }); }, @@ -392,6 +429,7 @@ export function EnrollEksCluster(props: AgentStepProps) { ctx.storeUser.state.cluster, isAppDiscoveryEnabled, selectedCluster, + customLabels, ] ); @@ -457,7 +495,7 @@ export function EnrollEksCluster(props: AgentStepProps) { autoDiscovery={isAutoDiscoveryEnabled} fetchStatus={tableData.fetchStatus} selectedCluster={selectedCluster} - onSelectCluster={setSelectedCluster} + onSelectCluster={onSelectCluster} fetchNextPage={fetchNextPage} /> )} @@ -469,32 +507,60 @@ export function EnrollEksCluster(props: AgentStepProps) { /> )} {!isAutoDiscoveryEnabled && ( - - Automatically enroll selected EKS cluster - - - Enroll EKS Cluster - - - { - setIsManualHelmDialogShown(b => !b); - }} - > - Or enroll manually - - - - + + {({ validator }) => ( + <> + {selectedCluster && ( + <> + + Optionally Add More Labels + + + + + )} + + + Automatically enroll selected EKS cluster + + + enrollWithValidation(validator)} + disabled={enrollmentNotAllowed} + mt={2} + mb={2} + > + Enroll EKS Cluster + + + showManualHelmDialog(validator)} + > + Or enroll manually + + + + + + )} + )} {isAutoDiscoveryEnabled && ( { if (joinToken && !command) { setCommand(setJoinTokenAndGetCommand(joinToken)); } + + return () => clearCachedJoinTokenResult(resourceKinds); }, [joinToken, command, setJoinTokenAndGetCommand]); return ( diff --git a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.story.tsx b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/HelmChart.story.tsx similarity index 100% rename from web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.story.tsx rename to web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/HelmChart.story.tsx diff --git a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.test.tsx b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/HelmChart.test.tsx similarity index 100% rename from web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.test.tsx rename to web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/HelmChart.test.tsx diff --git a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.tsx b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/HelmChart.tsx similarity index 80% rename from web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.tsx rename to web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/HelmChart.tsx index ef91a9ff0160d..454e7bd8f57a4 100644 --- a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/HelmChart.tsx +++ b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/HelmChart.tsx @@ -16,10 +16,19 @@ * along with this program. If not, see . */ -import { Suspense, useState } from 'react'; +import { Suspense, useEffect, useState } from 'react'; import styled from 'styled-components'; -import { Box, ButtonSecondary, H3, Link, Mark, Subtitle3, Text } from 'design'; +import { + Box, + ButtonSecondary, + Flex, + H3, + Link, + Mark, + Subtitle3, + Text, +} from 'design'; import * as Icons from 'design/Icon'; import { P } from 'design/Text/Text'; import FieldInput from 'shared/components/FieldInput'; @@ -35,6 +44,7 @@ import { WaitingInfo, } from 'teleport/Discover/Shared/HintBox'; import { usePingTeleport } from 'teleport/Discover/Shared/PingTeleportContext'; +import { ResourceLabelTooltip } from 'teleport/Discover/Shared/ResourceLabelTooltip'; import { clearCachedJoinTokenResult, useJoinTokenSuspender, @@ -49,16 +59,18 @@ import { ActionButtons, Header, HeaderSubtitle, + LabelsCreater, ResourceKind, TextIcon, useShowHint, -} from '../../Shared'; -import type { AgentStepProps } from '../../types'; +} from '../../../Shared'; +import type { AgentStepProps } from '../../../types'; export default function Container(props: AgentStepProps) { const [namespace, setNamespace] = useState(''); const [clusterName, setClusterName] = useState(''); const [showHelmChart, setShowHelmChart] = useState(false); + const [labels, setLabels] = useState([]); return ( // This outer CatchError and Suspense handles @@ -82,6 +94,9 @@ export default function Container(props: AgentStepProps) { setNamespace={setNamespace} clusterName={clusterName} setClusterName={setClusterName} + labels={labels} + onChangeLabels={setLabels} + generateScript={fallbackProps.retry} /> null} @@ -102,6 +117,9 @@ export default function Container(props: AgentStepProps) { setNamespace={setNamespace} clusterName={clusterName} setClusterName={setClusterName} + labels={labels} + onChangeLabels={setLabels} + processing={true} /> null} @@ -122,6 +140,8 @@ export default function Container(props: AgentStepProps) { setNamespace={setNamespace} clusterName={clusterName} setClusterName={setClusterName} + labels={labels} + onChangeLabels={setLabels} /> null} @@ -138,6 +158,8 @@ export default function Container(props: AgentStepProps) { setNamespace={setNamespace} clusterName={clusterName} setClusterName={setClusterName} + labels={labels} + onChangeLabels={setLabels} /> )} @@ -145,6 +167,12 @@ export default function Container(props: AgentStepProps) { ); } +const resourceKinds = [ + ResourceKind.Kubernetes, + ResourceKind.Application, + ResourceKind.Discovery, +]; + export function HelmChart( props: AgentStepProps & { onEdit: () => void; @@ -152,26 +180,33 @@ export function HelmChart( setNamespace(n: string): void; clusterName: string; setClusterName(c: string): void; + labels: ResourceLabel[]; + onChangeLabels(l: ResourceLabel[]): void; } ) { - const { joinToken, reloadJoinToken } = useJoinTokenSuspender([ - ResourceKind.Kubernetes, - ResourceKind.Application, - ResourceKind.Discovery, - ]); + const { joinToken, reloadJoinToken } = useJoinTokenSuspender({ + resourceKinds, + suggestedLabels: props.labels, + }); + + useEffect(() => { + return () => clearCachedJoinTokenResult(resourceKinds); + }); return ( props.onEdit()} generateScript={reloadJoinToken} namespace={props.namespace} setNamespace={props.setNamespace} clusterName={props.clusterName} setClusterName={props.setClusterName} + labels={props.labels} + onChangeLabels={props.onChangeLabels} /> ); @@ -233,8 +269,11 @@ const StepTwo = ({ setClusterName, error, generateScript, - disabled, + showHelmChart, onEdit, + labels, + onChangeLabels, + processing, }: { error?: Error; generateScript?(): void; @@ -242,11 +281,19 @@ const StepTwo = ({ setNamespace(n: string): void; clusterName: string; setClusterName(c: string): void; - disabled?: boolean; + showHelmChart?: boolean; + processing?: boolean; onEdit: () => void; + labels: ResourceLabel[]; + onChangeLabels(l: ResourceLabel[]): void; }) => { - function handleSubmit(validator: Validator) { - if (!validator.validate()) { + const disabled = showHelmChart || processing; + + function handleSubmit( + inputFieldValidator: Validator, + labelsValidator: Validator + ) { + if (!inputFieldValidator.validate() || !labelsValidator.validate()) { return; } generateScript(); @@ -262,7 +309,7 @@ const StepTwo = ({ - {({ validator }) => ( + {({ validator: inputFieldValidator }) => ( <> setClusterName(e.target.value)} /> - {disabled ? ( - onEdit()} - > - Edit - - ) : ( - handleSubmit(validator)} - > - Next - - )} + + Add Labels (Optional) + + + + {({ validator: labelsValidator }) => ( + <> + + + + {showHelmChart ? ( + onEdit()} + > + Edit + + ) : ( + + handleSubmit(inputFieldValidator, labelsValidator) + } + disabled={processing} + > + Generate Command + + )} + + )} + )} @@ -391,6 +460,7 @@ const InstallHelmChart = ({ nextStep, prevStep, updateAgentMeta, + labels, }: { namespace: string; clusterName: string; @@ -398,6 +468,7 @@ const InstallHelmChart = ({ nextStep(): void; prevStep(): void; updateAgentMeta(a: AgentMeta): void; + labels: ResourceLabel[]; }) => { const ctx = useTeleport(); @@ -477,6 +548,7 @@ const InstallHelmChart = ({ isCloud: ctx.isCloud, automaticUpgradesEnabled: ctx.automaticUpgradesEnabled, automaticUpgradesTargetVersion: ctx.automaticUpgradesTargetVersion, + joinLabels: labels, }); return ( diff --git a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/index.ts b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/index.ts similarity index 89% rename from web/packages/teleport/src/Discover/Kubernetes/HelmChart/index.ts rename to web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/index.ts index 0239113fe6f88..b995808c6f4e1 100644 --- a/web/packages/teleport/src/Discover/Kubernetes/HelmChart/index.ts +++ b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/HelmChart/index.ts @@ -16,6 +16,6 @@ * along with this program. If not, see . */ -import HelmChart from './HelmChart'; +import HelmChart, { generateCmd } from './HelmChart'; -export { HelmChart }; +export { HelmChart, generateCmd }; diff --git a/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/index.ts b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/index.ts new file mode 100644 index 0000000000000..6c6ef4aff54b0 --- /dev/null +++ b/web/packages/teleport/src/Discover/Kubernetes/SelfHosted/index.ts @@ -0,0 +1,19 @@ +/** + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +export * from './HelmChart'; diff --git a/web/packages/teleport/src/Discover/Kubernetes/index.tsx b/web/packages/teleport/src/Discover/Kubernetes/index.tsx index f5b668cab05b4..81595fd4d2282 100644 --- a/web/packages/teleport/src/Discover/Kubernetes/index.tsx +++ b/web/packages/teleport/src/Discover/Kubernetes/index.tsx @@ -24,8 +24,8 @@ import { KubeLocation, ResourceSpec } from 'teleport/Discover/SelectResource'; import { AwsAccount, Finished, ResourceKind } from 'teleport/Discover/Shared'; import { DiscoverEvent } from 'teleport/services/userEvent'; -import { HelmChart } from './HelmChart'; import { KubeWrapper } from './KubeWrapper'; +import { HelmChart } from './SelfHosted'; import { SetupAccess } from './SetupAccess'; import { TestConnection } from './TestConnection'; diff --git a/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx b/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx index 7dad3e0ec67de..277c05492f7ac 100644 --- a/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx +++ b/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.story.tsx @@ -72,7 +72,7 @@ export const Polling: StoryObj = { render() { return ( - + null} /> ); }, @@ -95,7 +95,7 @@ export const PollingSuccess: StoryObj = { render() { return ( - + null} /> ); }, @@ -120,7 +120,7 @@ export const PollingError: StoryObj = { render() { return ( - + null} /> ); }, @@ -139,7 +139,7 @@ export const Processing: StoryObj = { render() { return ( - + null} /> ); }, @@ -163,7 +163,7 @@ export const Failed: StoryObj = { render() { return ( - + null} /> ); }, diff --git a/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.tsx b/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.tsx index 9ce6b53edcb55..7e8de453f7f73 100644 --- a/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.tsx +++ b/web/packages/teleport/src/Discover/Server/DownloadScript/DownloadScript.tsx @@ -16,34 +16,38 @@ * along with this program. If not, see . */ -import React, { Suspense, useEffect, useState } from 'react'; +import { Suspense, useEffect, useState } from 'react'; -import { Box, Indicator, Mark, Text } from 'design'; +import { Box, ButtonSecondary, Flex, Mark, Text } from 'design'; import * as Icons from 'design/Icon'; -import { P } from 'design/Text/Text'; +import { H3, Subtitle3 } from 'design/Text/Text'; +import Validation, { Validator } from 'shared/components/Validation'; import { CatchError } from 'teleport/components/CatchError'; import { TextSelectCopyMulti } from 'teleport/components/TextSelectCopy'; import cfg from 'teleport/config'; -import { CommandBox } from 'teleport/Discover/Shared/CommandBox'; import { HintBox, SuccessBox, WaitingInfo, } from 'teleport/Discover/Shared/HintBox'; import { usePingTeleport } from 'teleport/Discover/Shared/PingTeleportContext'; +import { ResourceLabelTooltip } from 'teleport/Discover/Shared/ResourceLabelTooltip/ResourceLabelTooltip'; import { clearCachedJoinTokenResult, useJoinTokenSuspender, } from 'teleport/Discover/Shared/useJoinTokenSuspender'; +import { ResourceLabel } from 'teleport/services/agents'; import { JoinToken } from 'teleport/services/joinToken'; -import type { Node } from 'teleport/services/nodes'; +import { Node } from 'teleport/services/nodes'; import { ActionButtons, Header, HeaderSubtitle, + LabelsCreater, ResourceKind, + StyledBox, TextIcon, } from '../../Shared'; import { AgentStepProps } from '../../types'; @@ -51,38 +55,148 @@ import { AgentStepProps } from '../../types'; const SHOW_HINT_TIMEOUT = 1000 * 60 * 5; // 5 minutes export default function Container(props: AgentStepProps) { + const [labels, setLabels] = useState([]); + const [showScript, setShowScript] = useState(false); + + function toggleShowScript(validator: Validator) { + if (!validator.validate()) { + return; + } + setShowScript(!showScript); + } + + const commonProps = { + labels, + onChangeLabels: setLabels, + showScript, + onShowScript: toggleShowScript, + onPrev: props.prevStep, + }; + return ( clearCachedJoinTokenResult([ResourceKind.Server])} fallbackFn={fbProps => ( - + <> + + + )} > - -
+ <> + + + } > - + + + {showScript && } ); } -export function DownloadScript(props: AgentStepProps) { +const Heading = () => ( + <> +
Configure Resource
+ + Install and configure the Teleport SSH Service + + +); + +export function StepOne({ + labels, + onChangeLabels, + showScript, + onShowScript, + error, + processing = false, + onPrev, +}: { + labels: ResourceLabel[]; + onChangeLabels(l: ResourceLabel[]): void; + showScript: boolean; + onShowScript(validator: Validator): void; + error?: Error; + processing?: boolean; + onPrev(): void; +}) { + const nextLabelTxt = labels.length + ? 'Finish Adding Labels' + : 'Skip Adding Labels'; + return ( + <> + +
+

Step 1 (Optional)

+ + Add Labels + + +
+ + {({ validator }) => ( + <> + + {error && ( + + + Encountered Error: {error.message} + + )} + + onShowScript(validator)} + disabled={processing} + > + {showScript && !error ? 'Edit Labels' : nextLabelTxt} + + + + )} + +
+ {(!showScript || processing || error) && ( + null} + disableProceed={true} + onPrev={onPrev} + /> + )} + + ); +} + +export function StepTwoWithActionBtns( + props: AgentStepProps & { labels: ResourceLabel[] } +) { // Fetches join token. - const { joinToken } = useJoinTokenSuspender([ResourceKind.Server]); + const { joinToken } = useJoinTokenSuspender({ + resourceKinds: [ResourceKind.Server], + suggestedLabels: props.labels, + }); // Starts resource querying interval. const { result, active } = usePingTeleport(joinToken); @@ -92,7 +206,10 @@ export function DownloadScript(props: AgentStepProps) { if (active) { const id = window.setTimeout(() => setShowHint(true), SHOW_HINT_TIMEOUT); - return () => window.clearTimeout(id); + return () => { + window.clearTimeout(id); + clearCachedJoinTokenResult([ResourceKind.Server]); + }; } }, [active]); @@ -153,17 +270,22 @@ export function DownloadScript(props: AgentStepProps) { return ( <> -
Configure Resource
- - Install and configure the Teleport Service - -

Run the following command on the server you want to add.

- - - - {hint} + {joinToken && ( + <> + +
+

Step 2

+ + Run the following command on the server you want to add + +
+ +
+ {hint} + + )} { - return ( - <> -
Configure Resource
- - Install and configure the Teleport Service. -
- Run the following command on the server you want to add. -
- {children} - - - ); -}; - function createBashCommand(tokenId: string) { return `sudo bash -c "$(curl -fsSL ${cfg.getNodeScriptUrl(tokenId)})"`; } diff --git a/web/packages/teleport/src/Discover/Shared/LabelsCreater/LabelsCreater.tsx b/web/packages/teleport/src/Discover/Shared/LabelsCreater/LabelsCreater.tsx index dbf829e911fdd..1e733540d3c84 100644 --- a/web/packages/teleport/src/Discover/Shared/LabelsCreater/LabelsCreater.tsx +++ b/web/packages/teleport/src/Discover/Shared/LabelsCreater/LabelsCreater.tsx @@ -189,7 +189,7 @@ export function LabelsCreater({ })} { active: boolean; start: (tokenOrTerm: JoinToken | string) => void; result: T | null; + stop: () => void; } const pingTeleportContext = @@ -117,6 +118,7 @@ export function PingTeleportProvider(props: { active, start, result, + stop: () => setActive(false), }} > {props.children} @@ -137,6 +139,8 @@ export function usePingTeleport(tokenOrTerm: JoinToken | string) { if (!ctx.active && !ctx.result) { ctx.start(tokenOrTerm); } + + return () => ctx.stop(); }, []); return ctx; diff --git a/web/packages/teleport/src/Discover/Shared/useJoinTokenSuspender.ts b/web/packages/teleport/src/Discover/Shared/useJoinTokenSuspender.ts index 18e7f9e3f007c..f82c9d4d64a6d 100644 --- a/web/packages/teleport/src/Discover/Shared/useJoinTokenSuspender.ts +++ b/web/packages/teleport/src/Discover/Shared/useJoinTokenSuspender.ts @@ -41,11 +41,25 @@ export function clearCachedJoinTokenResult(resourceKinds: ResourceKind[]) { joinTokenCache.delete(resourceKinds.sort().join()); } -export function useJoinTokenSuspender( - resourceKinds: ResourceKind[], - suggestedAgentMatcherLabels: ResourceLabel[] = [], - joinMethod: JoinMethod = 'token' -): { +export function useJoinTokenSuspender({ + resourceKinds, + suggestedAgentMatcherLabels = [], + joinMethod = 'token', + suggestedLabels = [], +}: { + resourceKinds: ResourceKind[]; + /** + * labels used for the agent that will be created + * using a join token (eg: db agent) + */ + suggestedAgentMatcherLabels?: ResourceLabel[]; + joinMethod?: JoinMethod; + /** + * labels for a non-agent resource that will be created + * using a join token (currently only can be applied to server resource kind). + */ + suggestedLabels?: ResourceLabel[]; +}): { joinToken: JoinToken; reloadJoinToken: () => void; } { @@ -68,6 +82,7 @@ export function useJoinTokenSuspender( roles: resourceKinds.map(resourceKindToJoinRole), method: joinMethod, suggestedAgentMatcherLabels, + suggestedLabels, }, abortController.signal ) From a26c2a94e85d7a1c53d80e3999f7274392e1fec8 Mon Sep 17 00:00:00 2001 From: Forrest <30576607+fspmarshall@users.noreply.github.com> Date: Thu, 9 Jan 2025 14:30:37 -0800 Subject: [PATCH 36/45] add ssh identity object (#50787) --- lib/auth/auth.go | 68 ++-- lib/auth/keygen/keygen.go | 176 +++------- lib/auth/keygen/keygen_test.go | 34 +- lib/auth/test/suite.go | 158 +++++---- lib/auth/testauthority/testauthority.go | 3 +- lib/client/client_store_test.go | 23 +- lib/client/cluster_client_test.go | 10 +- lib/client/identityfile/identity_test.go | 10 +- lib/client/keyagent_test.go | 23 +- lib/reversetunnel/srv_test.go | 13 +- lib/services/authority.go | 99 ------ lib/srv/authhandlers_test.go | 30 +- lib/sshca/identity.go | 392 +++++++++++++++++++++++ lib/sshca/identity_test.go | 97 ++++++ lib/sshca/sshca.go | 37 ++- 15 files changed, 770 insertions(+), 403 deletions(-) create mode 100644 lib/sshca/identity.go create mode 100644 lib/sshca/identity_test.go diff --git a/lib/auth/auth.go b/lib/auth/auth.go index 82bd49e68befb..aef1a77ed2564 100644 --- a/lib/auth/auth.go +++ b/lib/auth/auth.go @@ -3241,39 +3241,41 @@ func generateCert(ctx context.Context, a *Server, req certRequest, caType types. return nil, trace.Wrap(err) } - params := services.UserCertParams{ - CASigner: sshSigner, - PublicUserKey: req.sshPublicKey, - Username: req.user.GetName(), - Impersonator: req.impersonator, - AllowedLogins: allowedLogins, - TTL: sessionTTL, - Roles: req.checker.RoleNames(), - CertificateFormat: certificateFormat, - PermitPortForwarding: req.checker.CanPortForward(), - PermitAgentForwarding: req.checker.CanForwardAgents(), - PermitX11Forwarding: req.checker.PermitX11Forwarding(), - RouteToCluster: req.routeToCluster, - Traits: req.traits, - ActiveRequests: req.activeRequests, - MFAVerified: req.mfaVerified, - PreviousIdentityExpires: req.previousIdentityExpires, - LoginIP: req.loginIP, - PinnedIP: pinnedIP, - DisallowReissue: req.disallowReissue, - Renewable: req.renewable, - Generation: req.generation, - BotName: req.botName, - BotInstanceID: req.botInstanceID, - CertificateExtensions: req.checker.CertificateExtensions(), - AllowedResourceIDs: requestedResourcesStr, - ConnectionDiagnosticID: req.connectionDiagnosticID, - PrivateKeyPolicy: attestedKeyPolicy, - DeviceID: req.deviceExtensions.DeviceID, - DeviceAssetTag: req.deviceExtensions.AssetTag, - DeviceCredentialID: req.deviceExtensions.CredentialID, - GitHubUserID: githubUserID, - GitHubUsername: githubUsername, + params := sshca.UserCertificateRequest{ + CASigner: sshSigner, + PublicUserKey: req.sshPublicKey, + TTL: sessionTTL, + CertificateFormat: certificateFormat, + Identity: sshca.Identity{ + Username: req.user.GetName(), + Impersonator: req.impersonator, + AllowedLogins: allowedLogins, + Roles: req.checker.RoleNames(), + PermitPortForwarding: req.checker.CanPortForward(), + PermitAgentForwarding: req.checker.CanForwardAgents(), + PermitX11Forwarding: req.checker.PermitX11Forwarding(), + RouteToCluster: req.routeToCluster, + Traits: req.traits, + ActiveRequests: req.activeRequests, + MFAVerified: req.mfaVerified, + PreviousIdentityExpires: req.previousIdentityExpires, + LoginIP: req.loginIP, + PinnedIP: pinnedIP, + DisallowReissue: req.disallowReissue, + Renewable: req.renewable, + Generation: req.generation, + BotName: req.botName, + BotInstanceID: req.botInstanceID, + CertificateExtensions: req.checker.CertificateExtensions(), + AllowedResourceIDs: requestedResourcesStr, + ConnectionDiagnosticID: req.connectionDiagnosticID, + PrivateKeyPolicy: attestedKeyPolicy, + DeviceID: req.deviceExtensions.DeviceID, + DeviceAssetTag: req.deviceExtensions.AssetTag, + DeviceCredentialID: req.deviceExtensions.CredentialID, + GitHubUserID: githubUserID, + GitHubUsername: githubUsername, + }, } signedSSHCert, err = a.GenerateUserCert(params) if err != nil { diff --git a/lib/auth/keygen/keygen.go b/lib/auth/keygen/keygen.go index cd6bb0acb28ee..5f47b3a90ac16 100644 --- a/lib/auth/keygen/keygen.go +++ b/lib/auth/keygen/keygen.go @@ -23,7 +23,6 @@ import ( "crypto/rand" "fmt" "log/slog" - "strings" "time" "github.com/gravitational/trace" @@ -31,12 +30,11 @@ import ( "golang.org/x/crypto/ssh" "github.com/gravitational/teleport" - "github.com/gravitational/teleport/api/constants" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/api/types/wrappers" apiutils "github.com/gravitational/teleport/api/utils" "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" "github.com/gravitational/teleport/lib/utils" ) @@ -129,164 +127,70 @@ func (k *Keygen) GenerateHostCertWithoutValidation(c services.HostCertParams) ([ // GenerateUserCert generates a user ssh certificate with the passed in parameters. // The private key of the CA to sign the certificate must be provided. -func (k *Keygen) GenerateUserCert(c services.UserCertParams) ([]byte, error) { - if err := c.CheckAndSetDefaults(); err != nil { - return nil, trace.Wrap(err, "error validating UserCertParams") +func (k *Keygen) GenerateUserCert(req sshca.UserCertificateRequest) ([]byte, error) { + if err := req.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err, "error validating user certificate request") } - return k.GenerateUserCertWithoutValidation(c) + return k.GenerateUserCertWithoutValidation(req) } // GenerateUserCertWithoutValidation generates a user ssh certificate with the // passed in parameters without validating them. -func (k *Keygen) GenerateUserCertWithoutValidation(c services.UserCertParams) ([]byte, error) { - pubKey, _, _, _, err := ssh.ParseAuthorizedKey(c.PublicUserKey) +func (k *Keygen) GenerateUserCertWithoutValidation(req sshca.UserCertificateRequest) ([]byte, error) { + pubKey, _, _, _, err := ssh.ParseAuthorizedKey(req.PublicUserKey) if err != nil { return nil, trace.Wrap(err) } - validBefore := uint64(ssh.CertTimeInfinity) - if c.TTL != 0 { - b := k.clock.Now().UTC().Add(c.TTL) - validBefore = uint64(b.Unix()) + + // create shallow copy of identity since we want to make some local changes + ident := req.Identity + + // since this method ignores the supplied values for ValidBefore/ValidAfter, avoid confusing by + // rejecting identities where they are set. + if ident.ValidBefore != 0 { + return nil, trace.BadParameter("ValidBefore should not be set in calls to GenerateUserCert") + } + if ident.ValidAfter != 0 { + return nil, trace.BadParameter("ValidAfter should not be set in calls to GenerateUserCert") + } + + // calculate ValidBefore based on the outer request TTL + ident.ValidBefore = uint64(ssh.CertTimeInfinity) + if req.TTL != 0 { + b := k.clock.Now().UTC().Add(req.TTL) + ident.ValidBefore = uint64(b.Unix()) slog.DebugContext( context.TODO(), "Generated user key with expiry.", - "allowed_logins", c.AllowedLogins, - "valid_before_unix_ts", validBefore, + "allowed_logins", ident.AllowedLogins, + "valid_before_unix_ts", ident.ValidBefore, "valid_before", b, ) } - cert := &ssh.Certificate{ - // we have to use key id to identify teleport user - KeyId: c.Username, - ValidPrincipals: c.AllowedLogins, - Key: pubKey, - ValidAfter: uint64(k.clock.Now().UTC().Add(-1 * time.Minute).Unix()), - ValidBefore: validBefore, - CertType: ssh.UserCert, - } - cert.Permissions.Extensions = map[string]string{ - teleport.CertExtensionPermitPTY: "", - } - if c.PermitX11Forwarding { - cert.Permissions.Extensions[teleport.CertExtensionPermitX11Forwarding] = "" - } - if c.PermitAgentForwarding { - cert.Permissions.Extensions[teleport.CertExtensionPermitAgentForwarding] = "" - } - if c.PermitPortForwarding { - cert.Permissions.Extensions[teleport.CertExtensionPermitPortForwarding] = "" - } - if c.MFAVerified != "" { - cert.Permissions.Extensions[teleport.CertExtensionMFAVerified] = c.MFAVerified - } - if !c.PreviousIdentityExpires.IsZero() { - cert.Permissions.Extensions[teleport.CertExtensionPreviousIdentityExpires] = c.PreviousIdentityExpires.Format(time.RFC3339) - } - if c.LoginIP != "" { - cert.Permissions.Extensions[teleport.CertExtensionLoginIP] = c.LoginIP - } - if c.Impersonator != "" { - cert.Permissions.Extensions[teleport.CertExtensionImpersonator] = c.Impersonator - } - if c.DisallowReissue { - cert.Permissions.Extensions[teleport.CertExtensionDisallowReissue] = "" - } - if c.Renewable { - cert.Permissions.Extensions[teleport.CertExtensionRenewable] = "" - } - if c.Generation > 0 { - cert.Permissions.Extensions[teleport.CertExtensionGeneration] = fmt.Sprint(c.Generation) - } - if c.BotName != "" { - cert.Permissions.Extensions[teleport.CertExtensionBotName] = c.BotName - } - if c.BotInstanceID != "" { - cert.Permissions.Extensions[teleport.CertExtensionBotInstanceID] = c.BotInstanceID - } - if c.AllowedResourceIDs != "" { - cert.Permissions.Extensions[teleport.CertExtensionAllowedResources] = c.AllowedResourceIDs - } - if c.ConnectionDiagnosticID != "" { - cert.Permissions.Extensions[teleport.CertExtensionConnectionDiagnosticID] = c.ConnectionDiagnosticID - } - if c.PrivateKeyPolicy != "" { - cert.Permissions.Extensions[teleport.CertExtensionPrivateKeyPolicy] = string(c.PrivateKeyPolicy) - } - if devID := c.DeviceID; devID != "" { - cert.Permissions.Extensions[teleport.CertExtensionDeviceID] = devID - } - if assetTag := c.DeviceAssetTag; assetTag != "" { - cert.Permissions.Extensions[teleport.CertExtensionDeviceAssetTag] = assetTag - } - if credID := c.DeviceCredentialID; credID != "" { - cert.Permissions.Extensions[teleport.CertExtensionDeviceCredentialID] = credID - } - if c.GitHubUserID != "" { - cert.Permissions.Extensions[teleport.CertExtensionGitHubUserID] = c.GitHubUserID - } - if c.GitHubUsername != "" { - cert.Permissions.Extensions[teleport.CertExtensionGitHubUsername] = c.GitHubUsername - } - if c.PinnedIP != "" { + // set ValidAfter to be 1 minute in the past + ident.ValidAfter = uint64(k.clock.Now().UTC().Add(-1 * time.Minute).Unix()) + + // if the provided identity is attempting to perform IP pinning, make sure modules are enforced + if ident.PinnedIP != "" { if modules.GetModules().BuildType() != modules.BuildEnterprise { return nil, trace.AccessDenied("source IP pinning is only supported in Teleport Enterprise") } - if cert.CriticalOptions == nil { - cert.CriticalOptions = make(map[string]string) - } - // IPv4, all bits matter - ip := c.PinnedIP + "/32" - if strings.Contains(c.PinnedIP, ":") { - // IPv6 - ip = c.PinnedIP + "/128" - } - cert.CriticalOptions[teleport.CertCriticalOptionSourceAddress] = ip } - for _, extension := range c.CertificateExtensions { - // TODO(lxea): update behavior when non ssh, non extensions are supported. - if extension.Mode != types.CertExtensionMode_EXTENSION || - extension.Type != types.CertExtensionType_SSH { - continue - } - cert.Extensions[extension.Name] = extension.Value + // encode the identity into a certificate + cert, err := ident.Encode(req.CertificateFormat) + if err != nil { + return nil, trace.Wrap(err) } - // Add roles, traits, and route to cluster in the certificate extensions if - // the standard format was requested. Certificate extensions are not included - // legacy SSH certificates due to a bug in OpenSSH <= OpenSSH 7.1: - // https://bugzilla.mindrot.org/show_bug.cgi?id=2387 - if c.CertificateFormat == constants.CertificateFormatStandard { - traits, err := wrappers.MarshalTraits(&c.Traits) - if err != nil { - return nil, trace.Wrap(err) - } - if len(traits) > 0 { - cert.Permissions.Extensions[teleport.CertExtensionTeleportTraits] = string(traits) - } - if len(c.Roles) != 0 { - roles, err := services.MarshalCertRoles(c.Roles) - if err != nil { - return nil, trace.Wrap(err) - } - cert.Permissions.Extensions[teleport.CertExtensionTeleportRoles] = roles - } - if c.RouteToCluster != "" { - cert.Permissions.Extensions[teleport.CertExtensionTeleportRouteToCluster] = c.RouteToCluster - } - if !c.ActiveRequests.IsEmpty() { - requests, err := c.ActiveRequests.Marshal() - if err != nil { - return nil, trace.Wrap(err) - } - cert.Permissions.Extensions[teleport.CertExtensionTeleportActiveRequests] = string(requests) - } - } + // set the public key of the certificate + cert.Key = pubKey - if err := cert.SignCert(rand.Reader, c.CASigner); err != nil { + if err := cert.SignCert(rand.Reader, req.CASigner); err != nil { return nil, trace.Wrap(err) } + return ssh.MarshalAuthorizedKey(cert), nil } diff --git a/lib/auth/keygen/keygen_test.go b/lib/auth/keygen/keygen_test.go index e2d68d91a923e..d6c243b3ee986 100644 --- a/lib/auth/keygen/keygen_test.go +++ b/lib/auth/keygen/keygen_test.go @@ -38,6 +38,7 @@ import ( "github.com/gravitational/teleport/lib/auth/test" "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" ) type nativeContext struct { @@ -226,23 +227,24 @@ func TestUserCertCompatibility(t *testing.T) { for i, tc := range tests { comment := fmt.Sprintf("Test %v", i) - userCertificateBytes, err := tt.suite.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: ssh.MarshalAuthorizedKey(caSigner.PublicKey()), - Username: "user", - AllowedLogins: []string{"centos", "root"}, - TTL: time.Hour, - Roles: []string{"foo"}, - CertificateExtensions: []*types.CertExtension{{ - Type: types.CertExtensionType_SSH, - Mode: types.CertExtensionMode_EXTENSION, - Name: "login@github.com", - Value: "hello", + userCertificateBytes, err := tt.suite.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: ssh.MarshalAuthorizedKey(caSigner.PublicKey()), + TTL: time.Hour, + CertificateFormat: tc.inCompatibility, + Identity: sshca.Identity{ + Username: "user", + AllowedLogins: []string{"centos", "root"}, + Roles: []string{"foo"}, + CertificateExtensions: []*types.CertExtension{{ + Type: types.CertExtensionType_SSH, + Mode: types.CertExtensionMode_EXTENSION, + Name: "login@github.com", + Value: "hello", + }}, + PermitAgentForwarding: true, + PermitPortForwarding: true, }, - }, - CertificateFormat: tc.inCompatibility, - PermitAgentForwarding: true, - PermitPortForwarding: true, }) require.NoError(t, err, comment) diff --git a/lib/auth/test/suite.go b/lib/auth/test/suite.go index 3e97874d8802e..14d22f8265647 100644 --- a/lib/auth/test/suite.go +++ b/lib/auth/test/suite.go @@ -95,15 +95,17 @@ func (s *AuthSuite) GenerateUserCert(t *testing.T) { caSigner, err := ssh.ParsePrivateKey(priv) require.NoError(t, err) - cert, err := s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: pub, - Username: "user", - AllowedLogins: []string{"centos", "root"}, - TTL: time.Hour, - PermitAgentForwarding: true, - PermitPortForwarding: true, - CertificateFormat: constants.CertificateFormatStandard, + cert, err := s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: pub, + TTL: time.Hour, + CertificateFormat: constants.CertificateFormatStandard, + Identity: sshca.Identity{ + Username: "user", + AllowedLogins: []string{"centos", "root"}, + PermitAgentForwarding: true, + PermitPortForwarding: true, + }, }) require.NoError(t, err) @@ -112,59 +114,67 @@ func (s *AuthSuite) GenerateUserCert(t *testing.T) { err = checkCertExpiry(cert, s.Clock.Now().Add(-1*time.Minute), s.Clock.Now().Add(1*time.Hour)) require.NoError(t, err) - cert, err = s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: pub, - Username: "user", - AllowedLogins: []string{"root"}, - TTL: -20, - PermitAgentForwarding: true, - PermitPortForwarding: true, - CertificateFormat: constants.CertificateFormatStandard, + cert, err = s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: pub, + TTL: -20, + CertificateFormat: constants.CertificateFormatStandard, + Identity: sshca.Identity{ + Username: "user", + AllowedLogins: []string{"root"}, + PermitAgentForwarding: true, + PermitPortForwarding: true, + }, }) require.NoError(t, err) err = checkCertExpiry(cert, s.Clock.Now().Add(-1*time.Minute), s.Clock.Now().Add(apidefaults.MinCertDuration)) require.NoError(t, err) - _, err = s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: pub, - Username: "user", - AllowedLogins: []string{"root"}, - TTL: 0, - PermitAgentForwarding: true, - PermitPortForwarding: true, - CertificateFormat: constants.CertificateFormatStandard, + _, err = s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: pub, + TTL: 0, + CertificateFormat: constants.CertificateFormatStandard, + Identity: sshca.Identity{ + Username: "user", + AllowedLogins: []string{"root"}, + PermitAgentForwarding: true, + PermitPortForwarding: true, + }, }) require.NoError(t, err) err = checkCertExpiry(cert, s.Clock.Now().Add(-1*time.Minute), s.Clock.Now().Add(apidefaults.MinCertDuration)) require.NoError(t, err) - _, err = s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: pub, - Username: "user", - AllowedLogins: []string{"root"}, - TTL: time.Hour, - PermitAgentForwarding: true, - PermitPortForwarding: true, - CertificateFormat: constants.CertificateFormatStandard, + _, err = s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: pub, + TTL: time.Hour, + CertificateFormat: constants.CertificateFormatStandard, + Identity: sshca.Identity{ + Username: "user", + AllowedLogins: []string{"root"}, + PermitAgentForwarding: true, + PermitPortForwarding: true, + }, }) require.NoError(t, err) inRoles := []string{"role-1", "role-2"} impersonator := "alice" - cert, err = s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: pub, - Username: "user", - Impersonator: impersonator, - AllowedLogins: []string{"root"}, - TTL: time.Hour, - PermitAgentForwarding: true, - PermitPortForwarding: true, - CertificateFormat: constants.CertificateFormatStandard, - Roles: inRoles, + cert, err = s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: pub, + TTL: time.Hour, + CertificateFormat: constants.CertificateFormatStandard, + Identity: sshca.Identity{ + Username: "user", + Impersonator: impersonator, + AllowedLogins: []string{"root"}, + PermitAgentForwarding: true, + PermitPortForwarding: true, + Roles: inRoles, + }, }) require.NoError(t, err) parsedCert, err := sshutils.ParseCertificate(cert) @@ -178,15 +188,17 @@ func (s *AuthSuite) GenerateUserCert(t *testing.T) { // Check that MFAVerified and PreviousIdentityExpires are encoded into ssh cert clock := clockwork.NewFakeClock() - cert, err = s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: pub, - Username: "user", - AllowedLogins: []string{"root"}, - TTL: time.Minute, - CertificateFormat: constants.CertificateFormatStandard, - MFAVerified: "mfa-device-id", - PreviousIdentityExpires: clock.Now().Add(time.Hour), + cert, err = s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: pub, + TTL: time.Minute, + CertificateFormat: constants.CertificateFormatStandard, + Identity: sshca.Identity{ + Username: "user", + AllowedLogins: []string{"root"}, + MFAVerified: "mfa-device-id", + PreviousIdentityExpires: clock.Now().Add(time.Hour), + }, }) require.NoError(t, err) parsedCert, err = sshutils.ParseCertificate(cert) @@ -202,14 +214,16 @@ func (s *AuthSuite) GenerateUserCert(t *testing.T) { const devID = "deviceid1" const devTag = "devicetag1" const devCred = "devicecred1" - certRaw, err := s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, // Required. - PublicUserKey: pub, // Required. - Username: "llama", // Required. - AllowedLogins: []string{"llama"}, // Required. - DeviceID: devID, - DeviceAssetTag: devTag, - DeviceCredentialID: devCred, + certRaw, err := s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, // Required. + PublicUserKey: pub, // Required. + Identity: sshca.Identity{ + Username: "llama", // Required. + AllowedLogins: []string{"llama"}, // Required. + DeviceID: devID, + DeviceAssetTag: devTag, + DeviceCredentialID: devCred, + }, }) require.NoError(t, err, "GenerateUserCert failed") @@ -223,13 +237,15 @@ func (s *AuthSuite) GenerateUserCert(t *testing.T) { t.Run("github identity", func(t *testing.T) { githubUserID := "1234567" githubUsername := "github-user" - certRaw, err := s.A.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, // Required. - PublicUserKey: pub, // Required. - Username: "llama", // Required. - AllowedLogins: []string{"llama"}, // Required. - GitHubUserID: githubUserID, - GitHubUsername: githubUsername, + certRaw, err := s.A.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, // Required. + PublicUserKey: pub, // Required. + Identity: sshca.Identity{ + Username: "llama", // Required. + AllowedLogins: []string{"llama"}, // Required. + GitHubUserID: githubUserID, + GitHubUsername: githubUsername, + }, }) require.NoError(t, err, "GenerateUserCert failed") diff --git a/lib/auth/testauthority/testauthority.go b/lib/auth/testauthority/testauthority.go index 8dae039d9c1f4..b58f9ac27493d 100644 --- a/lib/auth/testauthority/testauthority.go +++ b/lib/auth/testauthority/testauthority.go @@ -29,6 +29,7 @@ import ( "github.com/gravitational/teleport/lib/auth/keygen" "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" ) type Keygen struct { @@ -60,7 +61,7 @@ func (n *Keygen) GenerateHostCert(c services.HostCertParams) ([]byte, error) { return n.GenerateHostCertWithoutValidation(c) } -func (n *Keygen) GenerateUserCert(c services.UserCertParams) ([]byte, error) { +func (n *Keygen) GenerateUserCert(c sshca.UserCertificateRequest) ([]byte, error) { return n.GenerateUserCertWithoutValidation(c) } diff --git a/lib/client/client_store_test.go b/lib/client/client_store_test.go index 8090c5e664851..71239884aaaba 100644 --- a/lib/client/client_store_test.go +++ b/lib/client/client_store_test.go @@ -45,6 +45,7 @@ import ( "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" "github.com/gravitational/teleport/lib/sshutils" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" @@ -104,16 +105,18 @@ func (s *testAuthority) makeSignedKeyRing(t *testing.T, idx KeyRingIndex, makeEx caSigner, err := ssh.ParsePrivateKey(CAPriv) require.NoError(t, err) - cert, err := s.keygen.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: sshPriv.MarshalSSHPublicKey(), - Username: idx.Username, - AllowedLogins: allowedLogins, - TTL: ttl, - PermitAgentForwarding: false, - PermitPortForwarding: true, - GitHubUserID: "1234567", - GitHubUsername: "github-username", + cert, err := s.keygen.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: sshPriv.MarshalSSHPublicKey(), + TTL: ttl, + Identity: sshca.Identity{ + Username: idx.Username, + AllowedLogins: allowedLogins, + PermitAgentForwarding: false, + PermitPortForwarding: true, + GitHubUserID: "1234567", + GitHubUsername: "github-username", + }, }) require.NoError(t, err) diff --git a/lib/client/cluster_client_test.go b/lib/client/cluster_client_test.go index 7a90be3f30d80..e529b4737d1db 100644 --- a/lib/client/cluster_client_test.go +++ b/lib/client/cluster_client_test.go @@ -39,7 +39,7 @@ import ( libmfa "github.com/gravitational/teleport/lib/client/mfa" "github.com/gravitational/teleport/lib/fixtures" "github.com/gravitational/teleport/lib/observability/tracing" - "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" "github.com/gravitational/teleport/lib/tlsca" ) @@ -390,13 +390,15 @@ func TestIssueUserCertsWithMFA(t *testing.T) { var sshCert, tlsCert []byte var err error if req.SSHPublicKey != nil { - sshCert, err = ca.keygen.GenerateUserCert(services.UserCertParams{ + sshCert, err = ca.keygen.GenerateUserCert(sshca.UserCertificateRequest{ CASigner: caSigner, PublicUserKey: req.SSHPublicKey, TTL: req.Expires.Sub(clock.Now()), - Username: req.Username, CertificateFormat: req.Format, - RouteToCluster: req.RouteToCluster, + Identity: sshca.Identity{ + Username: req.Username, + RouteToCluster: req.RouteToCluster, + }, }) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/client/identityfile/identity_test.go b/lib/client/identityfile/identity_test.go index 3f52aefe162db..9d8eeb62a894d 100644 --- a/lib/client/identityfile/identity_test.go +++ b/lib/client/identityfile/identity_test.go @@ -46,7 +46,7 @@ import ( "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/fixtures" "github.com/gravitational/teleport/lib/kube/kubeconfig" - "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" "github.com/gravitational/teleport/lib/sshutils" "github.com/gravitational/teleport/lib/tlsca" ) @@ -108,11 +108,13 @@ func newClientKeyRing(t *testing.T, modifiers ...func(*tlsca.Identity)) *client. caSigner, err := ssh.NewSignerFromKey(signer) require.NoError(t, err) - certificate, err := keygen.GenerateUserCert(services.UserCertParams{ + certificate, err := keygen.GenerateUserCert(sshca.UserCertificateRequest{ CASigner: caSigner, PublicUserKey: ssh.MarshalAuthorizedKey(privateKey.SSHPublicKey()), - Username: "testuser", - AllowedLogins: []string{"testuser"}, + Identity: sshca.Identity{ + Username: "testuser", + AllowedLogins: []string{"testuser"}, + }, }) require.NoError(t, err) diff --git a/lib/client/keyagent_test.go b/lib/client/keyagent_test.go index 4c0c078e82293..a8dfdae28da95 100644 --- a/lib/client/keyagent_test.go +++ b/lib/client/keyagent_test.go @@ -50,6 +50,7 @@ import ( "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/fixtures" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" ) @@ -751,16 +752,18 @@ func (s *KeyAgentTestSuite) makeKeyRing(t *testing.T, username, proxyHost string sshPub, err := ssh.NewPublicKey(sshKey.Public()) require.NoError(t, err) - certificate, err := testauthority.New().GenerateUserCert(services.UserCertParams{ - CertificateFormat: constants.CertificateFormatStandard, - CASigner: caSigner, - PublicUserKey: ssh.MarshalAuthorizedKey(sshPub), - Username: username, - AllowedLogins: []string{username}, - TTL: ttl, - PermitAgentForwarding: true, - PermitPortForwarding: true, - RouteToCluster: s.clusterName, + certificate, err := testauthority.New().GenerateUserCert(sshca.UserCertificateRequest{ + CertificateFormat: constants.CertificateFormatStandard, + CASigner: caSigner, + PublicUserKey: ssh.MarshalAuthorizedKey(sshPub), + TTL: ttl, + Identity: sshca.Identity{ + Username: username, + AllowedLogins: []string{username}, + PermitAgentForwarding: true, + PermitPortForwarding: true, + RouteToCluster: s.clusterName, + }, }) require.NoError(t, err) diff --git a/lib/reversetunnel/srv_test.go b/lib/reversetunnel/srv_test.go index 2477739df359a..8794a8323f0f1 100644 --- a/lib/reversetunnel/srv_test.go +++ b/lib/reversetunnel/srv_test.go @@ -39,6 +39,7 @@ import ( "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/auth/testauthority" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" "github.com/gravitational/teleport/lib/utils" ) @@ -103,15 +104,17 @@ func TestServerKeyAuth(t *testing.T) { { desc: "user cert", key: func() ssh.PublicKey { - rawCert, err := ta.GenerateUserCert(services.UserCertParams{ + rawCert, err := ta.GenerateUserCert(sshca.UserCertificateRequest{ CASigner: caSigner, PublicUserKey: pub, - Username: con.User(), - AllowedLogins: []string{con.User()}, - Roles: []string{"dev", "admin"}, - RouteToCluster: "user-cluster-name", CertificateFormat: constants.CertificateFormatStandard, TTL: time.Minute, + Identity: sshca.Identity{ + Username: con.User(), + AllowedLogins: []string{con.User()}, + Roles: []string{"dev", "admin"}, + RouteToCluster: "user-cluster-name", + }, }) require.NoError(t, err) key, _, _, _, err := ssh.ParseAuthorizedKey(rawCert) diff --git a/lib/services/authority.go b/lib/services/authority.go index fb6a3efe612e6..2345342b1195b 100644 --- a/lib/services/authority.go +++ b/lib/services/authority.go @@ -32,9 +32,7 @@ import ( "github.com/jonboulle/clockwork" "golang.org/x/crypto/ssh" - apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/api/types/wrappers" apiutils "github.com/gravitational/teleport/api/utils" "github.com/gravitational/teleport/api/utils/keys" "github.com/gravitational/teleport/lib/jwt" @@ -321,103 +319,6 @@ func (c HostCertParams) Check() error { return nil } -// UserCertParams defines OpenSSH user certificate parameters -type UserCertParams struct { - // CASigner is the signer that will sign the public key of the user with the CA private key - CASigner ssh.Signer - // PublicUserKey is the public key of the user in SSH authorized_keys format. - PublicUserKey []byte - // TTL defines how long a certificate is valid for - TTL time.Duration - // Username is teleport username - Username string - // Impersonator is set when a user requests certificate for another user - Impersonator string - // AllowedLogins is a list of SSH principals - AllowedLogins []string - // PermitX11Forwarding permits X11 forwarding for this cert - PermitX11Forwarding bool - // PermitAgentForwarding permits agent forwarding for this cert - PermitAgentForwarding bool - // PermitPortForwarding permits port forwarding. - PermitPortForwarding bool - // PermitFileCopying permits the use of SCP/SFTP. - PermitFileCopying bool - // Roles is a list of roles assigned to this user - Roles []string - // CertificateFormat is the format of the SSH certificate. - CertificateFormat string - // RouteToCluster specifies the target cluster - // if present in the certificate, will be used - // to route the requests to - RouteToCluster string - // Traits hold claim data used to populate a role at runtime. - Traits wrappers.Traits - // ActiveRequests tracks privilege escalation requests applied during - // certificate construction. - ActiveRequests RequestIDs - // MFAVerified is the UUID of an MFA device when this Identity was - // confirmed immediately after an MFA check. - MFAVerified string - // PreviousIdentityExpires is the expiry time of the identity/cert that this - // identity/cert was derived from. It is used to determine a session's hard - // deadline in cases where both require_session_mfa and disconnect_expired_cert - // are enabled. See https://github.com/gravitational/teleport/issues/18544. - PreviousIdentityExpires time.Time - // LoginIP is an observed IP of the client on the moment of certificate creation. - LoginIP string - // PinnedIP is an IP from which client must communicate with Teleport. - PinnedIP string - // DisallowReissue flags that any attempt to request new certificates while - // authenticated with this cert should be denied. - DisallowReissue bool - // CertificateExtensions are user configured ssh key extensions - CertificateExtensions []*types.CertExtension - // Renewable indicates this certificate is renewable. - Renewable bool - // Generation counts the number of times a certificate has been renewed. - Generation uint64 - // BotName is set to the name of the bot, if the user is a Machine ID bot user. - // Empty for human users. - BotName string - // BotInstanceID is the unique identifier for the bot instance, if this is a - // Machine ID bot. It is empty for human users. - BotInstanceID string - // AllowedResourceIDs lists the resources the user should be able to access. - AllowedResourceIDs string - // ConnectionDiagnosticID references the ConnectionDiagnostic that we should use to append traces when testing a Connection. - ConnectionDiagnosticID string - // PrivateKeyPolicy is the private key policy supported by this certificate. - PrivateKeyPolicy keys.PrivateKeyPolicy - // DeviceID is the trusted device identifier. - DeviceID string - // DeviceAssetTag is the device inventory identifier. - DeviceAssetTag string - // DeviceCredentialID is the identifier for the credential used by the device - // to authenticate itself. - DeviceCredentialID string - // GitHubUserID indicates the GitHub user ID identified by the GitHub - // connector. - GitHubUserID string - // GitHubUserID indicates the GitHub username identified by the GitHub - // connector. - GitHubUsername string -} - -// CheckAndSetDefaults checks the user certificate parameters -func (c *UserCertParams) CheckAndSetDefaults() error { - if c.CASigner == nil { - return trace.BadParameter("CASigner is required") - } - if c.TTL < apidefaults.MinCertDuration { - c.TTL = apidefaults.MinCertDuration - } - if len(c.AllowedLogins) == 0 { - return trace.BadParameter("AllowedLogins are required") - } - return nil -} - // CertPoolFromCertAuthorities returns a certificate pool from the TLS certificates // set up in the certificate authorities list, as well as the number of certificates // that were added to the pool. diff --git a/lib/srv/authhandlers_test.go b/lib/srv/authhandlers_test.go index 78856817654a9..907a3db97b786 100644 --- a/lib/srv/authhandlers_test.go +++ b/lib/srv/authhandlers_test.go @@ -35,7 +35,7 @@ import ( "github.com/gravitational/teleport/lib/auth/testauthority" "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/events/eventstest" - "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/sshca" ) type mockCAandAuthPrefGetter struct { @@ -213,11 +213,13 @@ func TestRBAC(t *testing.T) { privateKey, err := cryptosuites.GeneratePrivateKeyWithAlgorithm(cryptosuites.ECDSAP256) require.NoError(t, err) - c, err := keygen.GenerateUserCert(services.UserCertParams{ + c, err := keygen.GenerateUserCert(sshca.UserCertificateRequest{ CASigner: caSigner, PublicUserKey: ssh.MarshalAuthorizedKey(privateKey.SSHPublicKey()), - Username: "testuser", - AllowedLogins: []string{"testuser"}, + Identity: sshca.Identity{ + Username: "testuser", + AllowedLogins: []string{"testuser"}, + }, }) require.NoError(t, err) @@ -385,16 +387,18 @@ func TestRBACJoinMFA(t *testing.T) { require.NoError(t, err) keygen := testauthority.New() - c, err := keygen.GenerateUserCert(services.UserCertParams{ - CASigner: caSigner, - PublicUserKey: privateKey.MarshalSSHPublicKey(), - Username: username, - AllowedLogins: []string{username}, - Traits: wrappers.Traits{ - teleport.TraitInternalPrefix: []string{""}, - }, - Roles: []string{tt.role}, + c, err := keygen.GenerateUserCert(sshca.UserCertificateRequest{ + CASigner: caSigner, + PublicUserKey: privateKey.MarshalSSHPublicKey(), CertificateFormat: constants.CertificateFormatStandard, + Identity: sshca.Identity{ + Username: username, + AllowedLogins: []string{username}, + Traits: wrappers.Traits{ + teleport.TraitInternalPrefix: []string{""}, + }, + Roles: []string{tt.role}, + }, }) require.NoError(t, err) diff --git a/lib/sshca/identity.go b/lib/sshca/identity.go new file mode 100644 index 0000000000000..19f40bfdf336d --- /dev/null +++ b/lib/sshca/identity.go @@ -0,0 +1,392 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// Package sshca specifies interfaces for SSH certificate authorities +package sshca + +import ( + "fmt" + "maps" + "strconv" + "strings" + "time" + + "github.com/gravitational/trace" + "golang.org/x/crypto/ssh" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/api/constants" + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/wrappers" + "github.com/gravitational/teleport/api/utils/keys" + "github.com/gravitational/teleport/lib/services" +) + +// Identity is a user identity. All identity fields map directly to an ssh certificate field. +type Identity struct { + // ValidAfter is the unix timestamp that marks the start time for when the certificate should + // be considered valid. + ValidAfter uint64 + // ValidBefore is the unix timestamp that marks the end time for when the certificate should + // be considered valid. + ValidBefore uint64 + // Username is teleport username + Username string + // Impersonator is set when a user requests certificate for another user + Impersonator string + // AllowedLogins is a list of SSH principals + AllowedLogins []string + // PermitX11Forwarding permits X11 forwarding for this cert + PermitX11Forwarding bool + // PermitAgentForwarding permits agent forwarding for this cert + PermitAgentForwarding bool + // PermitPortForwarding permits port forwarding. + PermitPortForwarding bool + // Roles is a list of roles assigned to this user + Roles []string + // RouteToCluster specifies the target cluster + // if present in the certificate, will be used + // to route the requests to + RouteToCluster string + // Traits hold claim data used to populate a role at runtime. + Traits wrappers.Traits + // ActiveRequests tracks privilege escalation requests applied during + // certificate construction. + ActiveRequests services.RequestIDs + // MFAVerified is the UUID of an MFA device when this Identity was + // confirmed immediately after an MFA check. + MFAVerified string + // PreviousIdentityExpires is the expiry time of the identity/cert that this + // identity/cert was derived from. It is used to determine a session's hard + // deadline in cases where both require_session_mfa and disconnect_expired_cert + // are enabled. See https://github.com/gravitational/teleport/issues/18544. + PreviousIdentityExpires time.Time + // LoginIP is an observed IP of the client on the moment of certificate creation. + LoginIP string + // PinnedIP is an IP from which client must communicate with Teleport. + PinnedIP string + // DisallowReissue flags that any attempt to request new certificates while + // authenticated with this cert should be denied. + DisallowReissue bool + // CertificateExtensions are user configured ssh key extensions (note: this field also + // ends up aggregating all *unknown* extensions during cert parsing, meaning that this + // can sometimes contain fields that were inserted by a newer version of teleport). + CertificateExtensions []*types.CertExtension + // Renewable indicates this certificate is renewable. + Renewable bool + // Generation counts the number of times a certificate has been renewed, with a generation of 1 + // meaning the cert has never been renewed. A generation of zero means the cert's generation is + // not being tracked. + Generation uint64 + // BotName is set to the name of the bot, if the user is a Machine ID bot user. + // Empty for human users. + BotName string + // BotInstanceID is the unique identifier for the bot instance, if this is a + // Machine ID bot. It is empty for human users. + BotInstanceID string + // AllowedResourceIDs lists the resources the user should be able to access. + AllowedResourceIDs string + // ConnectionDiagnosticID references the ConnectionDiagnostic that we should use to append traces when testing a Connection. + ConnectionDiagnosticID string + // PrivateKeyPolicy is the private key policy supported by this certificate. + PrivateKeyPolicy keys.PrivateKeyPolicy + // DeviceID is the trusted device identifier. + DeviceID string + // DeviceAssetTag is the device inventory identifier. + DeviceAssetTag string + // DeviceCredentialID is the identifier for the credential used by the device + // to authenticate itself. + DeviceCredentialID string + // GitHubUserID indicates the GitHub user ID identified by the GitHub + // connector. + GitHubUserID string + // GitHubUsername indicates the GitHub username identified by the GitHub + // connector. + GitHubUsername string +} + +// Check performs validation of certain fields in the identity. +func (i *Identity) Check() error { + if len(i.AllowedLogins) == 0 { + return trace.BadParameter("ssh user identity missing allowed logins") + } + + return nil +} + +// Encode encodes the identity into an ssh certificate. Note that the returned certificate is incomplete +// and must be have its public key set before signing. +func (i *Identity) Encode(certFormat string) (*ssh.Certificate, error) { + validBefore := i.ValidBefore + if validBefore == 0 { + validBefore = uint64(ssh.CertTimeInfinity) + } + validAfter := i.ValidAfter + if validAfter == 0 { + validAfter = uint64(time.Now().UTC().Add(-1 * time.Minute).Unix()) + } + cert := &ssh.Certificate{ + // we have to use key id to identify teleport user + KeyId: i.Username, + ValidPrincipals: i.AllowedLogins, + ValidAfter: validAfter, + ValidBefore: validBefore, + CertType: ssh.UserCert, + } + cert.Permissions.Extensions = map[string]string{ + teleport.CertExtensionPermitPTY: "", + } + + if i.PermitX11Forwarding { + cert.Permissions.Extensions[teleport.CertExtensionPermitX11Forwarding] = "" + } + if i.PermitAgentForwarding { + cert.Permissions.Extensions[teleport.CertExtensionPermitAgentForwarding] = "" + } + if i.PermitPortForwarding { + cert.Permissions.Extensions[teleport.CertExtensionPermitPortForwarding] = "" + } + if i.MFAVerified != "" { + cert.Permissions.Extensions[teleport.CertExtensionMFAVerified] = i.MFAVerified + } + if !i.PreviousIdentityExpires.IsZero() { + cert.Permissions.Extensions[teleport.CertExtensionPreviousIdentityExpires] = i.PreviousIdentityExpires.Format(time.RFC3339) + } + if i.LoginIP != "" { + cert.Permissions.Extensions[teleport.CertExtensionLoginIP] = i.LoginIP + } + if i.Impersonator != "" { + cert.Permissions.Extensions[teleport.CertExtensionImpersonator] = i.Impersonator + } + if i.DisallowReissue { + cert.Permissions.Extensions[teleport.CertExtensionDisallowReissue] = "" + } + if i.Renewable { + cert.Permissions.Extensions[teleport.CertExtensionRenewable] = "" + } + if i.Generation > 0 { + cert.Permissions.Extensions[teleport.CertExtensionGeneration] = fmt.Sprint(i.Generation) + } + if i.BotName != "" { + cert.Permissions.Extensions[teleport.CertExtensionBotName] = i.BotName + } + if i.BotInstanceID != "" { + cert.Permissions.Extensions[teleport.CertExtensionBotInstanceID] = i.BotInstanceID + } + if i.AllowedResourceIDs != "" { + cert.Permissions.Extensions[teleport.CertExtensionAllowedResources] = i.AllowedResourceIDs + } + if i.ConnectionDiagnosticID != "" { + cert.Permissions.Extensions[teleport.CertExtensionConnectionDiagnosticID] = i.ConnectionDiagnosticID + } + if i.PrivateKeyPolicy != "" { + cert.Permissions.Extensions[teleport.CertExtensionPrivateKeyPolicy] = string(i.PrivateKeyPolicy) + } + if devID := i.DeviceID; devID != "" { + cert.Permissions.Extensions[teleport.CertExtensionDeviceID] = devID + } + if assetTag := i.DeviceAssetTag; assetTag != "" { + cert.Permissions.Extensions[teleport.CertExtensionDeviceAssetTag] = assetTag + } + if credID := i.DeviceCredentialID; credID != "" { + cert.Permissions.Extensions[teleport.CertExtensionDeviceCredentialID] = credID + } + if i.GitHubUserID != "" { + cert.Permissions.Extensions[teleport.CertExtensionGitHubUserID] = i.GitHubUserID + } + if i.GitHubUsername != "" { + cert.Permissions.Extensions[teleport.CertExtensionGitHubUsername] = i.GitHubUsername + } + + if i.PinnedIP != "" { + if cert.CriticalOptions == nil { + cert.CriticalOptions = make(map[string]string) + } + // IPv4, all bits matter + ip := i.PinnedIP + "/32" + if strings.Contains(i.PinnedIP, ":") { + // IPv6 + ip = i.PinnedIP + "/128" + } + cert.CriticalOptions[teleport.CertCriticalOptionSourceAddress] = ip + } + + for _, extension := range i.CertificateExtensions { + // TODO(lxea): update behavior when non ssh, non extensions are supported. + if extension.Mode != types.CertExtensionMode_EXTENSION || + extension.Type != types.CertExtensionType_SSH { + continue + } + cert.Extensions[extension.Name] = extension.Value + } + + // Add roles, traits, and route to cluster in the certificate extensions if + // the standard format was requested. Certificate extensions are not included + // legacy SSH certificates due to a bug in OpenSSH <= OpenSSH 7.1: + // https://bugzilla.mindrot.org/show_bug.cgi?id=2387 + if certFormat == constants.CertificateFormatStandard { + traits, err := wrappers.MarshalTraits(&i.Traits) + if err != nil { + return nil, trace.Wrap(err) + } + if len(traits) > 0 { + cert.Permissions.Extensions[teleport.CertExtensionTeleportTraits] = string(traits) + } + if len(i.Roles) != 0 { + roles, err := services.MarshalCertRoles(i.Roles) + if err != nil { + return nil, trace.Wrap(err) + } + cert.Permissions.Extensions[teleport.CertExtensionTeleportRoles] = roles + } + if i.RouteToCluster != "" { + cert.Permissions.Extensions[teleport.CertExtensionTeleportRouteToCluster] = i.RouteToCluster + } + if !i.ActiveRequests.IsEmpty() { + requests, err := i.ActiveRequests.Marshal() + if err != nil { + return nil, trace.Wrap(err) + } + cert.Permissions.Extensions[teleport.CertExtensionTeleportActiveRequests] = string(requests) + } + } + + return cert, nil +} + +// DecodeIdentity decodes an ssh certificate into an identity. +func DecodeIdentity(cert *ssh.Certificate) (*Identity, error) { + if cert.CertType != ssh.UserCert { + return nil, trace.BadParameter("DecodeIdentity intended for use with user certs, got %v", cert.CertType) + } + ident := &Identity{ + Username: cert.KeyId, + AllowedLogins: cert.ValidPrincipals, + ValidAfter: cert.ValidAfter, + ValidBefore: cert.ValidBefore, + } + + // clone the extension map and remove entries from the clone as they are processed so + // that we can easily aggregate the remainder into the CertificateExtensions field. + extensions := maps.Clone(cert.Extensions) + + takeExtension := func(name string) (value string, ok bool) { + v, ok := extensions[name] + if !ok { + return "", false + } + delete(extensions, name) + return v, true + } + + takeValue := func(name string) string { + value, _ := takeExtension(name) + return value + } + + takeBool := func(name string) bool { + _, ok := takeExtension(name) + return ok + } + + // ignore the permit pty extension, it's always set + _, _ = takeExtension(teleport.CertExtensionPermitPTY) + + ident.PermitX11Forwarding = takeBool(teleport.CertExtensionPermitX11Forwarding) + ident.PermitAgentForwarding = takeBool(teleport.CertExtensionPermitAgentForwarding) + ident.PermitPortForwarding = takeBool(teleport.CertExtensionPermitPortForwarding) + ident.MFAVerified = takeValue(teleport.CertExtensionMFAVerified) + + if v, ok := takeExtension(teleport.CertExtensionPreviousIdentityExpires); ok { + t, err := time.Parse(time.RFC3339, v) + if err != nil { + return nil, trace.BadParameter("failed to parse value %q for extension %q as RFC3339 timestamp: %v", v, teleport.CertExtensionPreviousIdentityExpires, err) + } + ident.PreviousIdentityExpires = t + } + + ident.LoginIP = takeValue(teleport.CertExtensionLoginIP) + ident.Impersonator = takeValue(teleport.CertExtensionImpersonator) + ident.DisallowReissue = takeBool(teleport.CertExtensionDisallowReissue) + ident.Renewable = takeBool(teleport.CertExtensionRenewable) + + if v, ok := takeExtension(teleport.CertExtensionGeneration); ok { + i, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return nil, trace.BadParameter("failed to parse value %q for extension %q as uint64: %v", v, teleport.CertExtensionGeneration, err) + } + ident.Generation = i + } + + ident.BotName = takeValue(teleport.CertExtensionBotName) + ident.BotInstanceID = takeValue(teleport.CertExtensionBotInstanceID) + ident.AllowedResourceIDs = takeValue(teleport.CertExtensionAllowedResources) + ident.ConnectionDiagnosticID = takeValue(teleport.CertExtensionConnectionDiagnosticID) + ident.PrivateKeyPolicy = keys.PrivateKeyPolicy(takeValue(teleport.CertExtensionPrivateKeyPolicy)) + ident.DeviceID = takeValue(teleport.CertExtensionDeviceID) + ident.DeviceAssetTag = takeValue(teleport.CertExtensionDeviceAssetTag) + ident.DeviceCredentialID = takeValue(teleport.CertExtensionDeviceCredentialID) + ident.GitHubUserID = takeValue(teleport.CertExtensionGitHubUserID) + ident.GitHubUsername = takeValue(teleport.CertExtensionGitHubUsername) + + if v, ok := cert.CriticalOptions[teleport.CertCriticalOptionSourceAddress]; ok { + parts := strings.Split(v, "/") + if len(parts) != 2 { + return nil, trace.BadParameter("failed to parse value %q for critical option %q as CIDR", v, teleport.CertCriticalOptionSourceAddress) + } + ident.PinnedIP = parts[0] + } + + if v, ok := takeExtension(teleport.CertExtensionTeleportTraits); ok { + var traits wrappers.Traits + if err := wrappers.UnmarshalTraits([]byte(v), &traits); err != nil { + return nil, trace.BadParameter("failed to unmarshal value %q for extension %q as traits: %v", v, teleport.CertExtensionTeleportTraits, err) + } + ident.Traits = traits + } + + if v, ok := takeExtension(teleport.CertExtensionTeleportRoles); ok { + roles, err := services.UnmarshalCertRoles(v) + if err != nil { + return nil, trace.BadParameter("failed to unmarshal value %q for extension %q as roles: %v", v, teleport.CertExtensionTeleportRoles, err) + } + ident.Roles = roles + } + + ident.RouteToCluster = takeValue(teleport.CertExtensionTeleportRouteToCluster) + + if v, ok := takeExtension(teleport.CertExtensionTeleportActiveRequests); ok { + var requests services.RequestIDs + if err := requests.Unmarshal([]byte(v)); err != nil { + return nil, trace.BadParameter("failed to unmarshal value %q for extension %q as active requests: %v", v, teleport.CertExtensionTeleportActiveRequests, err) + } + ident.ActiveRequests = requests + } + + // aggregate all remaining extensions into the CertificateExtensions field + for name, value := range extensions { + ident.CertificateExtensions = append(ident.CertificateExtensions, &types.CertExtension{ + Name: name, + Value: value, + Type: types.CertExtensionType_SSH, + Mode: types.CertExtensionMode_EXTENSION, + }) + } + + return ident, nil +} diff --git a/lib/sshca/identity_test.go b/lib/sshca/identity_test.go new file mode 100644 index 0000000000000..5c7c6db75b3e8 --- /dev/null +++ b/lib/sshca/identity_test.go @@ -0,0 +1,97 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// Package sshca specifies interfaces for SSH certificate authorities +package sshca + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/gravitational/teleport/api/constants" + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/wrappers" + "github.com/gravitational/teleport/api/utils/keys" + "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/utils/testutils" +) + +func TestIdentityConversion(t *testing.T) { + ident := &Identity{ + ValidAfter: 1, + ValidBefore: 2, + Username: "user", + Impersonator: "impersonator", + AllowedLogins: []string{"login1", "login2"}, + PermitX11Forwarding: true, + PermitAgentForwarding: true, + PermitPortForwarding: true, + Roles: []string{"role1", "role2"}, + RouteToCluster: "cluster", + Traits: wrappers.Traits{"trait1": []string{"value1"}, "trait2": []string{"value2"}}, + ActiveRequests: services.RequestIDs{ + AccessRequests: []string{uuid.NewString()}, + }, + MFAVerified: "mfa", + PreviousIdentityExpires: time.Unix(12345, 0), + LoginIP: "127.0.0.1", + PinnedIP: "127.0.0.1", + DisallowReissue: true, + CertificateExtensions: []*types.CertExtension{&types.CertExtension{ + Name: "extname", + Value: "extvalue", + Type: types.CertExtensionType_SSH, + Mode: types.CertExtensionMode_EXTENSION, + }}, + Renewable: true, + Generation: 3, + BotName: "bot", + BotInstanceID: "instance", + AllowedResourceIDs: "resource", + ConnectionDiagnosticID: "diag", + PrivateKeyPolicy: keys.PrivateKeyPolicy("policy"), + DeviceID: "device", + DeviceAssetTag: "asset", + DeviceCredentialID: "cred", + GitHubUserID: "github", + GitHubUsername: "ghuser", + } + + ignores := []string{ + "CertExtension.Type", // only currently defined enum variant is a zero value + "CertExtension.Mode", // only currently defined enum variant is a zero value + // TODO(fspmarshall): figure out a mechanism for making ignore of grpc fields more convenient + "CertExtension.XXX_NoUnkeyedLiteral", + "CertExtension.XXX_unrecognized", + "CertExtension.XXX_sizecache", + } + + require.True(t, testutils.ExhaustiveNonEmpty(ident, ignores...), "empty=%+v", testutils.FindAllEmpty(ident, ignores...)) + + cert, err := ident.Encode(constants.CertificateFormatStandard) + require.NoError(t, err) + + ident2, err := DecodeIdentity(cert) + require.NoError(t, err) + + require.Empty(t, cmp.Diff(ident, ident2)) +} diff --git a/lib/sshca/sshca.go b/lib/sshca/sshca.go index 5e9e3f548f853..15f5dcf6c1aeb 100644 --- a/lib/sshca/sshca.go +++ b/lib/sshca/sshca.go @@ -20,6 +20,12 @@ package sshca import ( + "time" + + "github.com/gravitational/trace" + "golang.org/x/crypto/ssh" + + apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/lib/services" ) @@ -33,5 +39,34 @@ type Authority interface { // GenerateUserCert generates user ssh certificate, it takes pkey as a signing // private key (user certificate authority) - GenerateUserCert(certParams services.UserCertParams) ([]byte, error) + GenerateUserCert(UserCertificateRequest) ([]byte, error) +} + +// UserCertificateRequest is a request to generate a new ssh user certificate. +type UserCertificateRequest struct { + // CASigner is the signer that will sign the public key of the user with the CA private key + CASigner ssh.Signer + // PublicUserKey is the public key of the user in SSH authorized_keys format. + PublicUserKey []byte + // TTL defines how long a certificate is valid for (if specified, ValidAfter/ValidBefore within the + // identity must not be set). + TTL time.Duration + // CertificateFormat is the format of the SSH certificate. + CertificateFormat string + // Identity is the user identity to be encoded in the certificate. + Identity Identity +} + +func (r *UserCertificateRequest) CheckAndSetDefaults() error { + if r.CASigner == nil { + return trace.BadParameter("ssh user certificate request missing ca signer") + } + if r.TTL < apidefaults.MinCertDuration { + r.TTL = apidefaults.MinCertDuration + } + if err := r.Identity.Check(); err != nil { + return trace.Wrap(err) + } + + return nil } From 4ee850ee088722ae9db80b5089f8ea74dbcdab86 Mon Sep 17 00:00:00 2001 From: Lisa Kim Date: Thu, 9 Jan 2025 15:14:45 -0800 Subject: [PATCH 37/45] Pass join token suggestedLabels to app server labels during install.sh (#50720) * Allow adding app server labels from join token for install.sh * Address CRs * Reduce label yaml space, improve test --- lib/web/join_tokens.go | 8 ++++++++ lib/web/join_tokens_test.go | 11 +++++++++++ lib/web/scripts/node-join/install.sh | 8 ++++++++ 3 files changed, 27 insertions(+) diff --git a/lib/web/join_tokens.go b/lib/web/join_tokens.go index df9896f5e1532..d54269df7c381 100644 --- a/lib/web/join_tokens.go +++ b/lib/web/join_tokens.go @@ -631,6 +631,7 @@ func getJoinScript(ctx context.Context, settings scriptSettings, m nodeAPIGetter } var buf bytes.Buffer + var appServerResourceLabels []string // If app install mode is requested but parameters are blank for some reason, // we need to return an error. if settings.appInstallMode { @@ -640,6 +641,12 @@ func getJoinScript(ctx context.Context, settings scriptSettings, m nodeAPIGetter if !appURIPattern.MatchString(settings.appURI) { return "", trace.BadParameter("appURI %q contains invalid characters", settings.appURI) } + + suggestedLabels := token.GetSuggestedLabels() + appServerResourceLabels, err = scripts.MarshalLabelsYAML(suggestedLabels, 4) + if err != nil { + return "", trace.Wrap(err) + } } if settings.discoveryInstallMode { @@ -689,6 +696,7 @@ func getJoinScript(ctx context.Context, settings scriptSettings, m nodeAPIGetter "installUpdater": strconv.FormatBool(settings.installUpdater), "version": shsprintf.EscapeDefaultContext(version), "appInstallMode": strconv.FormatBool(settings.appInstallMode), + "appServerResourceLabels": appServerResourceLabels, "appName": shsprintf.EscapeDefaultContext(settings.appName), "appURI": shsprintf.EscapeDefaultContext(settings.appURI), "joinMethod": shsprintf.EscapeDefaultContext(settings.joinMethod), diff --git a/lib/web/join_tokens_test.go b/lib/web/join_tokens_test.go index ba0b0be4ff9b1..4e0062b333ef3 100644 --- a/lib/web/join_tokens_test.go +++ b/lib/web/join_tokens_test.go @@ -761,6 +761,17 @@ func TestGetNodeJoinScript(t *testing.T) { require.Contains(t, script, fmt.Sprintf("%s=%s", types.InternalResourceIDLabel, internalResourceID)) }, }, + { + desc: "app server labels", + settings: scriptSettings{token: validToken, appInstallMode: true, appName: "app-name", appURI: "app-uri"}, + errAssert: require.NoError, + extraAssertions: func(script string) { + require.Contains(t, script, `APP_NAME='app-name'`) + require.Contains(t, script, `APP_URI='app-uri'`) + require.Contains(t, script, `public_addr`) + require.Contains(t, script, fmt.Sprintf(" labels:\n %s: %s", types.InternalResourceIDLabel, internalResourceID)) + }, + }, } { t.Run(test.desc, func(t *testing.T) { script, err := getJoinScript(context.Background(), test.settings, m) diff --git a/lib/web/scripts/node-join/install.sh b/lib/web/scripts/node-join/install.sh index 3d8403c00787d..64c7cc6b6aab2 100755 --- a/lib/web/scripts/node-join/install.sh +++ b/lib/web/scripts/node-join/install.sh @@ -441,6 +441,11 @@ get_yaml_list() { install_teleport_app_config() { log "Writing Teleport app service config to ${TELEPORT_CONFIG_PATH}" CA_PINS_CONFIG=$(get_yaml_list "ca_pin" "${CA_PIN_HASHES}" " ") + # This file is processed by `shellschek` as part of the lint step + # It detects an issue because of un-set variables - $index and $line. This check is called SC2154. + # However, that's not an issue, because those variables are replaced when we run go's text/template engine over it. + # When executing the script, those are no long variables but actual values. + # shellcheck disable=SC2154 cat << EOF > ${TELEPORT_CONFIG_PATH} version: v3 teleport: @@ -463,6 +468,9 @@ app_service: - name: "${APP_NAME}" uri: "${APP_URI}" public_addr: ${APP_PUBLIC_ADDR} + labels:{{range $index, $line := .appServerResourceLabels}} + {{$line -}} +{{end}} EOF } # installs the provided teleport config (for database service) From 66f82c2f31bfb2ee10075a182f95ac61deea7919 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Fri, 10 Jan 2025 11:38:23 +0000 Subject: [PATCH 38/45] Fix UserTask Status not being updated (#50855) * Fix UserTask Status not being updated The Status field for UserTasks was not being correctly updated when the Spec.State was not changed. * copy the status field * use admin client instead of backend directly --- lib/auth/usertasks/usertasksv1/service.go | 25 +++++++++----- .../usertasks/usertasksv1/service_test.go | 33 ++++++++++++++++++- lib/web/usertasks_test.go | 5 ++- 3 files changed, 52 insertions(+), 11 deletions(-) diff --git a/lib/auth/usertasks/usertasksv1/service.go b/lib/auth/usertasks/usertasksv1/service.go index a383e55a70135..74223f258369c 100644 --- a/lib/auth/usertasks/usertasksv1/service.go +++ b/lib/auth/usertasks/usertasksv1/service.go @@ -19,6 +19,7 @@ package usertasksv1 import ( + "cmp" "context" "log/slog" "time" @@ -131,7 +132,7 @@ func (s *Service) CreateUserTask(ctx context.Context, req *usertasksv1.CreateUse return nil, trace.Wrap(err) } - s.updateStatus(req.UserTask) + s.updateStatus(req.UserTask, nil /* existing user task */) rsp, err := s.backend.CreateUserTask(ctx, req.UserTask) s.emitCreateAuditEvent(ctx, rsp, authCtx, err) @@ -264,10 +265,7 @@ func (s *Service) UpdateUserTask(ctx context.Context, req *usertasksv1.UpdateUse } stateChanged := existingUserTask.GetSpec().GetState() != req.GetUserTask().GetSpec().GetState() - - if stateChanged { - s.updateStatus(req.UserTask) - } + s.updateStatus(req.UserTask, existingUserTask) rsp, err := s.backend.UpdateUserTask(ctx, req.UserTask) s.emitUpdateAuditEvent(ctx, existingUserTask, req.GetUserTask(), authCtx, err) @@ -333,9 +331,7 @@ func (s *Service) UpsertUserTask(ctx context.Context, req *usertasksv1.UpsertUse stateChanged = existingUserTask.GetSpec().GetState() != req.GetUserTask().GetSpec().GetState() } - if stateChanged { - s.updateStatus(req.UserTask) - } + s.updateStatus(req.UserTask, existingUserTask) rsp, err := s.backend.UpsertUserTask(ctx, req.UserTask) s.emitUpsertAuditEvent(ctx, existingUserTask, req.GetUserTask(), authCtx, err) @@ -350,10 +346,21 @@ func (s *Service) UpsertUserTask(ctx context.Context, req *usertasksv1.UpsertUse return rsp, nil } -func (s *Service) updateStatus(ut *usertasksv1.UserTask) { +func (s *Service) updateStatus(ut *usertasksv1.UserTask, existing *usertasksv1.UserTask) { + // Default status for UserTask. ut.Status = &usertasksv1.UserTaskStatus{ LastStateChange: timestamppb.New(s.clock.Now()), } + + if existing != nil { + // Inherit everything from existing UserTask. + ut.Status.LastStateChange = cmp.Or(existing.GetStatus().GetLastStateChange(), ut.Status.LastStateChange) + + // Update specific values. + if existing.GetSpec().GetState() != ut.GetSpec().GetState() { + ut.Status.LastStateChange = timestamppb.New(s.clock.Now()) + } + } } func (s *Service) emitUpsertAuditEvent(ctx context.Context, old, new *usertasksv1.UserTask, authCtx *authz.Context, err error) { diff --git a/lib/auth/usertasks/usertasksv1/service_test.go b/lib/auth/usertasks/usertasksv1/service_test.go index d40b3740af591..1a909c278bdd8 100644 --- a/lib/auth/usertasks/usertasksv1/service_test.go +++ b/lib/auth/usertasks/usertasksv1/service_test.go @@ -153,6 +153,7 @@ func TestEvents(t *testing.T) { // LastStateChange is updated. require.Equal(t, timestamppb.New(fakeClock.Now()), createUserTaskResp.Status.LastStateChange) + expectedLastStateChange := createUserTaskResp.Status.LastStateChange ut1.Spec.DiscoverEc2.Instances["i-345"] = &usertasksv1.DiscoverEC2Instance{ InstanceId: "i-345", DiscoveryConfig: "dc01", @@ -165,7 +166,7 @@ func TestEvents(t *testing.T) { require.Len(t, testReporter.emittedEvents, 1) consumeAssertEvent(t, auditEventsSink.C(), auditEventFor(userTaskName, "update", "OPEN", "OPEN")) // LastStateChange is not updated. - require.Equal(t, createUserTaskResp.Status.LastStateChange, upsertUserTaskResp.Status.LastStateChange) + require.Equal(t, expectedLastStateChange.AsTime(), upsertUserTaskResp.Status.LastStateChange.AsTime()) ut1.Spec.State = "RESOLVED" fakeClock.Advance(1 * time.Minute) @@ -177,6 +178,36 @@ func TestEvents(t *testing.T) { // LastStateChange was updated because the state changed. require.Equal(t, timestamppb.New(fakeClock.Now()), updateUserTaskResp.Status.LastStateChange) + // Updating one of the instances. + expectedLastStateChange = updateUserTaskResp.Status.GetLastStateChange() + fakeClock.Advance(1 * time.Minute) + ut1.Spec.DiscoverEc2.Instances["i-345"] = &usertasksv1.DiscoverEC2Instance{ + InstanceId: "i-345", + DiscoveryConfig: "dc01", + DiscoveryGroup: "dg01", + SyncTime: timestamppb.New(fakeClock.Now()), + } + updateUserTaskResp, err = service.UpdateUserTask(ctx, &usertasksv1.UpdateUserTaskRequest{UserTask: ut1}) + require.NoError(t, err) + // Does not change the LastStateChange + require.Equal(t, expectedLastStateChange.AsTime(), updateUserTaskResp.Status.LastStateChange.AsTime()) + consumeAssertEvent(t, auditEventsSink.C(), auditEventFor(userTaskName, "update", "RESOLVED", "RESOLVED")) + + // Upserting one of the instances. + expectedLastStateChange = updateUserTaskResp.Status.GetLastStateChange() + fakeClock.Advance(1 * time.Minute) + ut1.Spec.DiscoverEc2.Instances["i-345"] = &usertasksv1.DiscoverEC2Instance{ + InstanceId: "i-345", + DiscoveryConfig: "dc01", + DiscoveryGroup: "dg01", + SyncTime: timestamppb.New(fakeClock.Now()), + } + upsertUserTaskResp, err = service.UpsertUserTask(ctx, &usertasksv1.UpsertUserTaskRequest{UserTask: ut1}) + require.NoError(t, err) + // Does not change the LastStateChange + require.Equal(t, expectedLastStateChange.AsTime(), upsertUserTaskResp.Status.LastStateChange.AsTime()) + consumeAssertEvent(t, auditEventsSink.C(), auditEventFor(userTaskName, "update", "RESOLVED", "RESOLVED")) + _, err = service.DeleteUserTask(ctx, &usertasksv1.DeleteUserTaskRequest{Name: userTaskName}) require.NoError(t, err) // No usage report for deleted resources. diff --git a/lib/web/usertasks_test.go b/lib/web/usertasks_test.go index 0bb2dbb9a9f9a..13e9723458090 100644 --- a/lib/web/usertasks_test.go +++ b/lib/web/usertasks_test.go @@ -31,6 +31,7 @@ import ( usertasksv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/usertasks/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/usertasks" + "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/web/ui" ) @@ -53,6 +54,8 @@ func TestUserTask(t *testing.T) { }) require.NoError(t, err) pack := env.proxies[0].authPack(t, userWithRW, []types.Role{roleRWUserTask}) + adminClient, err := env.server.NewClient(auth.TestAdmin()) + require.NoError(t, err) getAllEndpoint := pack.clt.Endpoint("webapi", "sites", clusterName, "usertask") singleItemEndpoint := func(name string) string { @@ -90,7 +93,7 @@ func TestUserTask(t *testing.T) { }) require.NoError(t, err) - _, err = env.proxies[0].auth.Auth().CreateUserTask(ctx, userTask) + _, err = adminClient.UserTasksServiceClient().CreateUserTask(ctx, userTask) require.NoError(t, err) userTaskForTest = userTask } From 4a10f05a96d33001bb54e90a38e15905957b0117 Mon Sep 17 00:00:00 2001 From: Noah Stride Date: Fri, 10 Jan 2025 13:25:02 +0000 Subject: [PATCH 39/45] Update protoc-gen-terraform to v3.0.2 (#50943) --- .github/workflows/lint.yaml | 2 +- integrations/terraform/Makefile | 2 +- integrations/terraform/README.md | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 0cb29695f968b..ff6cdd143e7e1 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -235,7 +235,7 @@ jobs: - name: Check if Terraform resources are up to date # We have to add the current directory as a safe directory or else git commands will not work as expected. # The protoc-gen-terraform version must match the version in integrations/terraform/Makefile - run: git config --global --add safe.directory $(realpath .) && go install github.com/gravitational/protoc-gen-terraform@c91cc3ef4d7d0046c36cb96b1cd337e466c61225 && make terraform-resources-up-to-date + run: git config --global --add safe.directory $(realpath .) && go install github.com/gravitational/protoc-gen-terraform/v3@v3.0.2 && make terraform-resources-up-to-date lint-rfd: name: Lint (RFD) diff --git a/integrations/terraform/Makefile b/integrations/terraform/Makefile index 572a07d4d45dc..149aef0ed5b4b 100644 --- a/integrations/terraform/Makefile +++ b/integrations/terraform/Makefile @@ -47,7 +47,7 @@ $(BUILDDIR)/terraform-provider-teleport_%: terraform-provider-teleport-v$(VERSIO CUSTOM_IMPORTS_TMP_DIR ?= /tmp/protoc-gen-terraform/custom-imports # This version must match the version installed by .github/workflows/lint.yaml -PROTOC_GEN_TERRAFORM_VERSION ?= v3.0.0 +PROTOC_GEN_TERRAFORM_VERSION ?= v3.0.2 PROTOC_GEN_TERRAFORM_EXISTS := $(shell $(PROTOC_GEN_TERRAFORM) version 2>&1 >/dev/null | grep 'protoc-gen-terraform $(PROTOC_GEN_TERRAFORM_VERSION)') .PHONY: gen-tfschema diff --git a/integrations/terraform/README.md b/integrations/terraform/README.md index 53e752f725d41..dde74bc7b793b 100644 --- a/integrations/terraform/README.md +++ b/integrations/terraform/README.md @@ -7,9 +7,9 @@ Please, refer to [official documentation](https://goteleport.com/docs/admin-guid ## Development 1. Install [`protobuf`](https://grpc.io/docs/protoc-installation/). -2. Install [`protoc-gen-terraform`](https://github.com/gravitational/protoc-gen-terraform) @v3.0.0. +2. Install [`protoc-gen-terraform`](https://github.com/gravitational/protoc-gen-terraform) @v3.0.2. - ```go install github.com/gravitational/protoc-gen-terraform@c91cc3ef4d7d0046c36cb96b1cd337e466c61225``` + ```go install github.com/gravitational/protoc-gen-terraform/v3@v3.0.2``` 3. Install [`Terraform`](https://learn.hashicorp.com/tutorials/terraform/install-cli) v1.1.0+. Alternatively, you can use [`tfenv`](https://github.com/tfutils/tfenv). Please note that on Mac M1 you need to specify `TFENV_ARCH` (ex: `TFENV_ARCH=arm64 tfenv install 1.1.6`). From 5b5bab980ea6c6fb21b8b552045f2a5612b8f867 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Fri, 10 Jan 2025 10:43:54 -0500 Subject: [PATCH 40/45] Use a non-global metrics registry in Teleport (#50913) * Support a non-global registry in Teleport * lint * Update lib/service/service.go Co-authored-by: rosstimothy <39066650+rosstimothy@users.noreply.github.com> --------- Co-authored-by: rosstimothy <39066650+rosstimothy@users.noreply.github.com> --- lib/service/service.go | 48 +++++++++++++++++- lib/service/service_test.go | 86 +++++++++++++++++++++++++++++++- lib/service/servicecfg/config.go | 11 ++++ 3 files changed, 142 insertions(+), 3 deletions(-) diff --git a/lib/service/service.go b/lib/service/service.go index 7638ee5e85caf..7fd997e7234f0 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -54,6 +54,7 @@ import ( "github.com/gravitational/roundtrip" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/quic-go/quic-go" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -657,6 +658,15 @@ type TeleportProcess struct { // resolver is used to identify the reverse tunnel address when connecting via // the proxy. resolver reversetunnelclient.Resolver + + // metricRegistry is the prometheus metric registry for the process. + // Every teleport service that wants to register metrics should use this + // instead of the global prometheus.DefaultRegisterer to avoid registration + // conflicts. + // + // Both the metricsRegistry and the default global registry are gathered by + // Telepeort's metric service. + metricsRegistry *prometheus.Registry } // processIndex is an internal process index @@ -1179,6 +1189,7 @@ func NewTeleport(cfg *servicecfg.Config) (*TeleportProcess, error) { logger: cfg.Logger, cloudLabels: cloudLabels, TracingProvider: tracing.NoopProvider(), + metricsRegistry: cfg.MetricsRegistry, } process.registerExpectedServices(cfg) @@ -3405,11 +3416,46 @@ func (process *TeleportProcess) initUploaderService() error { return nil } +// promHTTPLogAdapter adapts a slog.Logger into a promhttp.Logger. +type promHTTPLogAdapter struct { + ctx context.Context + *slog.Logger +} + +// Println implements the promhttp.Logger interface. +func (l promHTTPLogAdapter) Println(v ...interface{}) { + //nolint:sloglint // msg cannot be constant + l.ErrorContext(l.ctx, fmt.Sprint(v...)) +} + // initMetricsService starts the metrics service currently serving metrics for // prometheus consumption func (process *TeleportProcess) initMetricsService() error { mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.Handler()) + + // We gather metrics both from the in-process registry (preferred metrics registration method) + // and the global registry (used by some Teleport services and many dependencies). + gatherers := prometheus.Gatherers{ + process.metricsRegistry, + prometheus.DefaultGatherer, + } + + metricsHandler := promhttp.InstrumentMetricHandler( + process.metricsRegistry, promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{ + // Errors can happen if metrics are registered with identical names in both the local and the global registry. + // In this case, we log the error but continue collecting metrics. The first collected metric will win + // (the one from the local metrics registry takes precedence). + // As we move more things to the local registry, especially in other tools like tbot, we will have less + // conflicts in tests. + ErrorHandling: promhttp.ContinueOnError, + ErrorLog: promHTTPLogAdapter{ + ctx: process.ExitContext(), + Logger: process.logger.With(teleport.ComponentKey, teleport.ComponentMetrics), + }, + }), + ) + + mux.Handle("/metrics", metricsHandler) logger := process.logger.With(teleport.ComponentKey, teleport.Component(teleport.ComponentMetrics, process.id)) diff --git a/lib/service/service_test.go b/lib/service/service_test.go index 52e59387ff580..4c08a87689145 100644 --- a/lib/service/service_test.go +++ b/lib/service/service_test.go @@ -23,9 +23,11 @@ import ( "crypto/tls" "errors" "fmt" + "io" "log/slog" "net" "net/http" + "net/url" "os" "path/filepath" "strings" @@ -39,6 +41,8 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" "google.golang.org/grpc" @@ -1887,7 +1891,7 @@ func TestAgentRolloutController(t *testing.T) { dataDir := makeTempDir(t) cfg := servicecfg.MakeDefaultConfig() - // We use a real clock because too many sevrices are using the clock and it's not possible to accurately wait for + // We use a real clock because too many services are using the clock and it's not possible to accurately wait for // each one of them to reach the point where they wait for the clock to advance. If we add a WaitUntil(X waiters) // check, this will break the next time we add a new waiter. cfg.Clock = clockwork.NewRealClock() @@ -1906,7 +1910,7 @@ func TestAgentRolloutController(t *testing.T) { process, err := NewTeleport(cfg) require.NoError(t, err) - // Test setup: start the Teleport auth and wait for it to beocme ready + // Test setup: start the Teleport auth and wait for it to become ready require.NoError(t, process.Start()) // Test setup: wait for every service to start @@ -1949,6 +1953,84 @@ func TestAgentRolloutController(t *testing.T) { }, 5*time.Second, 10*time.Millisecond) } +func TestMetricsService(t *testing.T) { + t.Parallel() + // Test setup: create a listener for the metrics server, get its file descriptor. + + // Note: this code is copied from integrations/helpers/NewListenerOn() to avoid including helpers in a production + // build and avoid a cyclic dependency. + metricsListener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, metricsListener.Close()) + }) + require.IsType(t, &net.TCPListener{}, metricsListener) + metricsListenerFile, err := metricsListener.(*net.TCPListener).File() + require.NoError(t, err) + + // Test setup: create a new teleport process + dataDir := makeTempDir(t) + cfg := servicecfg.MakeDefaultConfig() + cfg.DataDir = dataDir + cfg.SetAuthServerAddress(utils.NetAddr{AddrNetwork: "tcp", Addr: "127.0.0.1:0"}) + cfg.Auth.Enabled = true + cfg.Proxy.Enabled = false + cfg.SSH.Enabled = false + cfg.DebugService.Enabled = false + cfg.Auth.StorageConfig.Params["path"] = dataDir + cfg.Auth.ListenAddr = utils.NetAddr{AddrNetwork: "tcp", Addr: "127.0.0.1:0"} + cfg.Metrics.Enabled = true + + // Configure the metrics server to use the listener we previously created. + cfg.Metrics.ListenAddr = &utils.NetAddr{AddrNetwork: "tcp", Addr: metricsListener.Addr().String()} + cfg.FileDescriptors = []*servicecfg.FileDescriptor{ + {Type: string(ListenerMetrics), Address: metricsListener.Addr().String(), File: metricsListenerFile}, + } + + // Create and start the Teleport service. + process, err := NewTeleport(cfg) + require.NoError(t, err) + require.NoError(t, process.Start()) + t.Cleanup(func() { + assert.NoError(t, process.Close()) + assert.NoError(t, process.Wait()) + }) + + // Test setup: create our test metrics. + nonce := strings.ReplaceAll(uuid.NewString(), "-", "") + localMetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "test", + Name: "local_metric_" + nonce, + }) + globalMetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "test", + Name: "global_metric_" + nonce, + }) + require.NoError(t, process.metricsRegistry.Register(localMetric)) + require.NoError(t, prometheus.Register(globalMetric)) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + t.Cleanup(cancel) + _, err = process.WaitForEvent(ctx, MetricsReady) + require.NoError(t, err) + + // Test execution: get metrics and check the tests metrics are here. + metricsURL, err := url.Parse("http://" + metricsListener.Addr().String()) + require.NoError(t, err) + metricsURL.Path = "/metrics" + resp, err := http.Get(metricsURL.String()) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + + // Test validation: check that the metrics server served both the local and global registry. + require.Contains(t, string(body), "local_metric_"+nonce) + require.Contains(t, string(body), "global_metric_"+nonce) +} + // makeTempDir makes a temp dir with a shorter name than t.TempDir() in order to // avoid https://github.com/golang/go/issues/62614. func makeTempDir(t *testing.T) string { diff --git a/lib/service/servicecfg/config.go b/lib/service/servicecfg/config.go index a89e79a8f6302..a89e29a2c7b54 100644 --- a/lib/service/servicecfg/config.go +++ b/lib/service/servicecfg/config.go @@ -34,6 +34,7 @@ import ( "github.com/ghodss/yaml" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" + "github.com/prometheus/client_golang/prometheus" "golang.org/x/crypto/ssh" "github.com/gravitational/teleport" @@ -264,6 +265,12 @@ type Config struct { // protocol. DatabaseREPLRegistry dbrepl.REPLRegistry + // MetricsRegistry is the prometheus metrics registry used by the Teleport process to register its metrics. + // As of today, not every Teleport metric is registered against this registry. Some Teleport services + // and Teleport dependencies are using the global registry. + // Both the MetricsRegistry and the default global registry are gathered by Teleport's metric service. + MetricsRegistry *prometheus.Registry + // token is either the token needed to join the auth server, or a path pointing to a file // that contains the token // @@ -520,6 +527,10 @@ func ApplyDefaults(cfg *Config) { cfg.LoggerLevel = new(slog.LevelVar) } + if cfg.MetricsRegistry == nil { + cfg.MetricsRegistry = prometheus.NewRegistry() + } + // Remove insecure and (borderline insecure) cryptographic primitives from // default configuration. These can still be added back in file configuration by // users, but not supported by default by Teleport. See #1856 for more From cca83feb66e777965f64162eb3f7bd7db34e4c3a Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Fri, 10 Jan 2025 11:08:42 -0500 Subject: [PATCH 41/45] Convert integrations to use slog (#50921) --- integrations/access/accesslist/app.go | 49 +++-- .../access_monitoring_rules.go | 14 +- integrations/access/accessrequest/app.go | 76 ++++--- integrations/access/common/app.go | 10 +- .../access/common/auth/token_provider.go | 23 ++- .../access/common/auth/token_provider_test.go | 7 +- integrations/access/datadog/bot.go | 2 +- integrations/access/datadog/client.go | 2 +- .../datadog/cmd/teleport-datadog/main.go | 13 +- .../access/datadog/testlib/fake_datadog.go | 3 +- integrations/access/discord/bot.go | 3 +- .../discord/cmd/teleport-discord/main.go | 13 +- .../access/discord/testlib/fake_discord.go | 3 +- integrations/access/email/app.go | 70 ++++--- integrations/access/email/client.go | 12 +- .../access/email/cmd/teleport-email/main.go | 15 +- integrations/access/email/mailers.go | 6 +- .../access/email/testlib/mock_mailgun.go | 4 +- integrations/access/jira/app.go | 109 +++++----- integrations/access/jira/client.go | 15 +- .../access/jira/cmd/teleport-jira/main.go | 13 +- integrations/access/jira/testlib/fake_jira.go | 3 +- integrations/access/jira/testlib/suite.go | 2 +- integrations/access/jira/webhook_server.go | 14 +- integrations/access/mattermost/bot.go | 20 +- .../cmd/teleport-mattermost/main.go | 13 +- .../mattermost/testlib/fake_mattermost.go | 3 +- integrations/access/msteams/app.go | 7 +- integrations/access/msteams/bot.go | 3 +- .../msteams/cmd/teleport-msteams/main.go | 3 +- .../access/msteams/testlib/fake_msteams.go | 3 +- integrations/access/msteams/uninstall.go | 17 +- integrations/access/msteams/validate.go | 7 +- integrations/access/opsgenie/app.go | 64 +++--- integrations/access/opsgenie/client.go | 10 +- .../access/opsgenie/testlib/fake_opsgenie.go | 3 +- integrations/access/pagerduty/app.go | 83 ++++---- integrations/access/pagerduty/client.go | 12 +- .../pagerduty/cmd/teleport-pagerduty/main.go | 13 +- .../pagerduty/testlib/fake_pagerduty.go | 3 +- integrations/access/servicenow/app.go | 78 ++++--- integrations/access/servicenow/client.go | 5 +- .../servicenow/testlib/fake_servicenow.go | 3 +- integrations/access/slack/bot.go | 6 +- .../access/slack/cmd/teleport-slack/main.go | 13 +- .../access/slack/testlib/fake_slack.go | 3 +- .../event-handler/fake_fluentd_test.go | 3 - integrations/event-handler/main.go | 25 +-- integrations/lib/bail.go | 8 +- integrations/lib/config.go | 10 +- integrations/lib/embeddedtbot/bot.go | 11 +- integrations/lib/http.go | 14 +- integrations/lib/logger/logger.go | 190 +++--------------- integrations/lib/signals.go | 7 +- integrations/lib/tctl/tctl.go | 24 ++- integrations/lib/testing/integration/suite.go | 4 +- integrations/lib/watcherjob/watcherjob.go | 16 +- .../crdgen/cmd/protoc-gen-crd-docs/debug.go | 27 ++- .../crdgen/cmd/protoc-gen-crd-docs/main.go | 14 +- .../crdgen/cmd/protoc-gen-crd/debug.go | 27 ++- .../crdgen/cmd/protoc-gen-crd/main.go | 14 +- integrations/terraform/go.mod | 2 +- integrations/terraform/provider/errors.go | 6 +- integrations/terraform/provider/provider.go | 43 ++-- lib/utils/log/log.go | 3 + 65 files changed, 651 insertions(+), 650 deletions(-) diff --git a/integrations/access/accesslist/app.go b/integrations/access/accesslist/app.go index 02f933baf5ecd..ba40de3abf575 100644 --- a/integrations/access/accesslist/app.go +++ b/integrations/access/accesslist/app.go @@ -33,6 +33,7 @@ import ( "github.com/gravitational/teleport/integrations/lib" "github.com/gravitational/teleport/integrations/lib/logger" pd "github.com/gravitational/teleport/integrations/lib/plugindata" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -118,7 +119,7 @@ func (a *App) run(ctx context.Context) error { log := logger.Get(ctx) - log.Info("Access list monitor is running") + log.InfoContext(ctx, "Access list monitor is running") a.job.SetReady(true) @@ -134,7 +135,7 @@ func (a *App) run(ctx context.Context) error { } timer.Reset(jitter(reminderInterval)) case <-ctx.Done(): - log.Info("Access list monitor is finished") + log.InfoContext(ctx, "Access list monitor is finished") return nil } } @@ -146,7 +147,7 @@ func (a *App) run(ctx context.Context) error { func (a *App) remindIfNecessary(ctx context.Context) error { log := logger.Get(ctx) - log.Info("Looking for Access List Review reminders") + log.InfoContext(ctx, "Looking for Access List Review reminders") var nextToken string var err error @@ -156,13 +157,14 @@ func (a *App) remindIfNecessary(ctx context.Context) error { accessLists, nextToken, err = a.apiClient.ListAccessLists(ctx, 0 /* default page size */, nextToken) if err != nil { if trace.IsNotImplemented(err) { - log.Errorf("access list endpoint is not implemented on this auth server, so the access list app is ceasing to run.") + log.ErrorContext(ctx, "access list endpoint is not implemented on this auth server, so the access list app is ceasing to run") return trace.Wrap(err) } else if trace.IsAccessDenied(err) { - log.Warnf("Slack bot does not have permissions to list access lists. Please add access_list read and list permissions " + - "to the role associated with the Slack bot.") + const msg = "Slack bot does not have permissions to list access lists. Please add access_list read and list permissions " + + "to the role associated with the Slack bot." + log.WarnContext(ctx, msg) } else { - log.Errorf("error listing access lists: %v", err) + log.ErrorContext(ctx, "error listing access lists", "error", err) } break } @@ -170,7 +172,10 @@ func (a *App) remindIfNecessary(ctx context.Context) error { for _, accessList := range accessLists { recipients, err := a.getRecipientsRequiringReminders(ctx, accessList) if err != nil { - log.WithError(err).Warnf("Error getting recipients to notify for review due for access list %q", accessList.Spec.Title) + log.WarnContext(ctx, "Error getting recipients to notify for review due for access list", + "error", err, + "access_list", accessList.Spec.Title, + ) continue } @@ -195,7 +200,7 @@ func (a *App) remindIfNecessary(ctx context.Context) error { } if len(errs) > 0 { - log.WithError(trace.NewAggregate(errs...)).Warn("Error notifying for access list reviews") + log.WarnContext(ctx, "Error notifying for access list reviews", "error", trace.NewAggregate(errs...)) } return nil @@ -213,7 +218,10 @@ func (a *App) getRecipientsRequiringReminders(ctx context.Context, accessList *a // If the current time before the notification start time, skip notifications. if now.Before(notificationStart) { - log.Debugf("Access list %s is not ready for notifications, notifications start at %s", accessList.GetName(), notificationStart.Format(time.RFC3339)) + log.DebugContext(ctx, "Access list is not ready for notifications", + "access_list", accessList.GetName(), + "notification_start_time", notificationStart.Format(time.RFC3339), + ) return nil, nil } @@ -255,12 +263,17 @@ func (a *App) fetchRecipients(ctx context.Context, accessList *accesslist.Access if err != nil { // TODO(kiosion): Remove in v18; protecting against server not having `GetAccessListOwners` func. if trace.IsNotImplemented(err) { - log.WithError(err).Warnf("Error getting nested owners for access list '%v', continuing with only explicit owners", accessList.GetName()) + log.WarnContext(ctx, "Error getting nested owners for access list, continuing with only explicit owners", + "error", err, + "access_list", accessList.GetName(), + ) for _, owner := range accessList.Spec.Owners { allOwners = append(allOwners, &owner) } } else { - log.WithError(err).Errorf("Error getting owners for access list '%v'", accessList.GetName()) + log.ErrorContext(ctx, "Error getting owners for access list", + "error", err, + "access_list", accessList.GetName()) } } @@ -270,7 +283,7 @@ func (a *App) fetchRecipients(ctx context.Context, accessList *accesslist.Access for _, owner := range allOwners { recipient, err := a.bot.FetchRecipient(ctx, owner.Name) if err != nil { - log.Debugf("error getting recipient %s", owner.Name) + log.DebugContext(ctx, "error getting recipient", "recipient", owner.Name) continue } allRecipients[owner.Name] = *recipient @@ -293,7 +306,10 @@ func (a *App) updatePluginDataAndGetRecipientsRequiringReminders(ctx context.Con // Calculate days from start. daysFromStart := now.Sub(notificationStart) / oneDay windowStart = notificationStart.Add(daysFromStart * oneDay) - log.Infof("windowStart: %s, now: %s", windowStart.String(), now.String()) + log.InfoContext(ctx, "calculating window start", + "window_start", logutils.StringerAttr(windowStart), + "now", logutils.StringerAttr(now), + ) } recipients := []common.Recipient{} @@ -304,7 +320,10 @@ func (a *App) updatePluginDataAndGetRecipientsRequiringReminders(ctx context.Con // If the notification window is before the last notification date, then this user doesn't need a notification. if !windowStart.After(lastNotification) { - log.Debugf("User %s has already been notified for access list %s", recipient.Name, accessList.GetName()) + log.DebugContext(ctx, "User has already been notified for access list", + "user", recipient.Name, + "access_list", accessList.GetName(), + ) userNotifications[recipient.Name] = lastNotification continue } diff --git a/integrations/access/accessmonitoring/access_monitoring_rules.go b/integrations/access/accessmonitoring/access_monitoring_rules.go index 3dea9ea2bf543..82c91413bff96 100644 --- a/integrations/access/accessmonitoring/access_monitoring_rules.go +++ b/integrations/access/accessmonitoring/access_monitoring_rules.go @@ -151,8 +151,10 @@ func (amrh *RuleHandler) RecipientsFromAccessMonitoringRules(ctx context.Context for _, rule := range amrh.getAccessMonitoringRules() { match, err := MatchAccessRequest(rule.Spec.Condition, req) if err != nil { - log.WithError(err).WithField("rule", rule.Metadata.Name). - Warn("Failed to parse access monitoring notification rule") + log.WarnContext(ctx, "Failed to parse access monitoring notification rule", + "error", err, + "rule", rule.Metadata.Name, + ) } if !match { continue @@ -160,7 +162,7 @@ func (amrh *RuleHandler) RecipientsFromAccessMonitoringRules(ctx context.Context for _, recipient := range rule.Spec.Notification.Recipients { rec, err := amrh.fetchRecipientCallback(ctx, recipient) if err != nil { - log.WithError(err).Warn("Failed to fetch plugin recipients based on Access monitoring rule recipients") + log.WarnContext(ctx, "Failed to fetch plugin recipients based on Access monitoring rule recipients", "error", err) continue } recipientSet.Add(*rec) @@ -176,8 +178,10 @@ func (amrh *RuleHandler) RawRecipientsFromAccessMonitoringRules(ctx context.Cont for _, rule := range amrh.getAccessMonitoringRules() { match, err := MatchAccessRequest(rule.Spec.Condition, req) if err != nil { - log.WithError(err).WithField("rule", rule.Metadata.Name). - Warn("Failed to parse access monitoring notification rule") + log.WarnContext(ctx, "Failed to parse access monitoring notification rule", + "error", err, + "rule", rule.Metadata.Name, + ) } if !match { continue diff --git a/integrations/access/accessrequest/app.go b/integrations/access/accessrequest/app.go index 8a5effc73dabd..17182ec3dc8ee 100644 --- a/integrations/access/accessrequest/app.go +++ b/integrations/access/accessrequest/app.go @@ -21,6 +21,7 @@ package accessrequest import ( "context" "fmt" + "log/slog" "slices" "strings" "time" @@ -36,6 +37,7 @@ import ( "github.com/gravitational/teleport/integrations/lib/logger" pd "github.com/gravitational/teleport/integrations/lib/plugindata" "github.com/gravitational/teleport/integrations/lib/watcherjob" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -189,16 +191,16 @@ func (a *App) onWatcherEvent(ctx context.Context, event types.Event) error { func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error { op := event.Type reqID := event.Resource.GetName() - ctx, _ = logger.WithField(ctx, "request_id", reqID) + ctx, _ = logger.With(ctx, "request_id", reqID) switch op { case types.OpPut: - ctx, _ = logger.WithField(ctx, "request_op", "put") + ctx, _ = logger.With(ctx, "request_op", "put") req, ok := event.Resource.(types.AccessRequest) if !ok { return trace.BadParameter("unexpected resource type %T", event.Resource) } - ctx, log := logger.WithField(ctx, "request_state", req.GetState().String()) + ctx, log := logger.With(ctx, "request_state", req.GetState().String()) var err error switch { @@ -207,21 +209,29 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error case req.GetState().IsResolved(): err = a.onResolvedRequest(ctx, req) default: - log.WithField("event", event).Warn("Unknown request state") + log.WarnContext(ctx, "Unknown request state", + slog.Group("event", + slog.Any("type", logutils.StringerAttr(event.Type)), + slog.Group("resource", + "kind", event.Resource.GetKind(), + "name", event.Resource.GetName(), + ), + ), + ) return nil } if err != nil { - log.WithError(err).Errorf("Failed to process request") + log.ErrorContext(ctx, "Failed to process request", "error", err) return trace.Wrap(err) } return nil case types.OpDelete: - ctx, log := logger.WithField(ctx, "request_op", "delete") + ctx, log := logger.With(ctx, "request_op", "delete") if err := a.onDeletedRequest(ctx, reqID); err != nil { - log.WithError(err).Errorf("Failed to process deleted request") + log.ErrorContext(ctx, "Failed to process deleted request", "error", err) return trace.Wrap(err) } return nil @@ -242,7 +252,7 @@ func (a *App) onPendingRequest(ctx context.Context, req types.AccessRequest) err loginsByRole, err := a.getLoginsByRole(ctx, req) if trace.IsAccessDenied(err) { - log.Warnf("Missing permissions to get logins by role. Please add role.read to the associated role. error: %s", err) + log.WarnContext(ctx, "Missing permissions to get logins by role, please add role.read to the associated role", "error", err) } else if err != nil { return trace.Wrap(err) } @@ -265,12 +275,12 @@ func (a *App) onPendingRequest(ctx context.Context, req types.AccessRequest) err return trace.Wrap(err) } } else { - log.Warning("No channel to post") + log.WarnContext(ctx, "No channel to post") } // Try to approve the request if user is currently on-call. if err := a.tryApproveRequest(ctx, reqID, req); err != nil { - log.Warningf("Failed to auto approve request: %v", err) + log.WarnContext(ctx, "Failed to auto approve request", "error", err) } case trace.IsAlreadyExists(err): // The messages were already sent, nothing to do, we can update the reviews @@ -311,7 +321,7 @@ func (a *App) onResolvedRequest(ctx context.Context, req types.AccessRequest) er case types.RequestState_PROMOTED: tag = pd.ResolvedPromoted default: - logger.Get(ctx).Warningf("Unknown state %v (%s)", state, state.String()) + logger.Get(ctx).WarnContext(ctx, "Unknown state", "state", logutils.StringerAttr(state)) return replyErr } err := trace.Wrap(a.updateMessages(ctx, req.GetName(), tag, reason, req.GetReviews())) @@ -330,13 +340,13 @@ func (a *App) broadcastAccessRequestMessages(ctx context.Context, recipients []c return trace.Wrap(err) } for _, data := range sentMessages { - logger.Get(ctx).WithFields(logger.Fields{ - "channel_id": data.ChannelID, - "message_id": data.MessageID, - }).Info("Successfully posted messages") + logger.Get(ctx).InfoContext(ctx, "Successfully posted messages", + "channel_id", data.ChannelID, + "message_id", data.MessageID, + ) } if err != nil { - logger.Get(ctx).WithError(err).Error("Failed to post one or more messages") + logger.Get(ctx).ErrorContext(ctx, "Failed to post one or more messages", "error", err) } _, err = a.pluginData.Update(ctx, reqID, func(existing PluginData) (PluginData, error) { @@ -369,7 +379,7 @@ func (a *App) postReviewReplies(ctx context.Context, reqID string, reqReviews [] return existing, nil }) if trace.IsAlreadyExists(err) { - logger.Get(ctx).Debug("Failed to post reply: replies are already sent") + logger.Get(ctx).DebugContext(ctx, "Failed to post reply: replies are already sent") return nil } if err != nil { @@ -383,7 +393,7 @@ func (a *App) postReviewReplies(ctx context.Context, reqID string, reqReviews [] errors := make([]error, 0, len(slice)) for _, data := range pd.SentMessages { - ctx, _ = logger.WithFields(ctx, logger.Fields{"channel_id": data.ChannelID, "message_id": data.MessageID}) + ctx, _ = logger.With(ctx, "channel_id", data.ChannelID, "message_id", data.MessageID) for _, review := range slice { if err := a.bot.PostReviewReply(ctx, data.ChannelID, data.MessageID, review); err != nil { errors = append(errors, err) @@ -425,7 +435,7 @@ func (a *App) getMessageRecipients(ctx context.Context, req types.AccessRequest) for _, recipient := range recipients { rec, err := a.bot.FetchRecipient(ctx, recipient) if err != nil { - log.Warningf("Failed to fetch Opsgenie recipient: %v", err) + log.WarnContext(ctx, "Failed to fetch Opsgenie recipient", "error", err) continue } recipientSet.Add(*rec) @@ -436,7 +446,7 @@ func (a *App) getMessageRecipients(ctx context.Context, req types.AccessRequest) validEmailSuggReviewers := []string{} for _, reviewer := range req.GetSuggestedReviewers() { if !lib.IsEmail(reviewer) { - log.Warningf("Failed to notify a suggested reviewer: %q does not look like a valid email", reviewer) + log.WarnContext(ctx, "Failed to notify a suggested reviewer with an invalid email address", "reviewer", reviewer) continue } @@ -446,7 +456,7 @@ func (a *App) getMessageRecipients(ctx context.Context, req types.AccessRequest) for _, rawRecipient := range rawRecipients { recipient, err := a.bot.FetchRecipient(ctx, rawRecipient) if err != nil { - log.WithError(err).Warn("Failure when fetching recipient, continuing anyway") + log.WarnContext(ctx, "Failure when fetching recipient, continuing anyway", "error", err) } else { recipientSet.Add(*recipient) } @@ -476,7 +486,7 @@ func (a *App) updateMessages(ctx context.Context, reqID string, tag pd.Resolutio return existing, nil }) if trace.IsNotFound(err) { - log.Debug("Failed to update messages: plugin data is missing") + log.DebugContext(ctx, "Failed to update messages: plugin data is missing") return nil } if trace.IsAlreadyExists(err) { @@ -485,7 +495,7 @@ func (a *App) updateMessages(ctx context.Context, reqID string, tag pd.Resolutio "cannot change the resolution tag of an already resolved request, existing: %s, event: %s", pluginData.ResolutionTag, tag) } - log.Debug("Request is already resolved, ignoring event") + log.DebugContext(ctx, "Request is already resolved, ignoring event") return nil } if err != nil { @@ -496,13 +506,17 @@ func (a *App) updateMessages(ctx context.Context, reqID string, tag pd.Resolutio if err := a.bot.UpdateMessages(ctx, reqID, reqData, sentMessages, reviews); err != nil { return trace.Wrap(err) } - log.Infof("Successfully marked request as %s in all messages", tag) + + log.InfoContext(ctx, "Marked request with resolution and sent emails!", "resolution", tag) if err := a.bot.NotifyUser(ctx, reqID, reqData); err != nil && !trace.IsNotImplemented(err) { return trace.Wrap(err) } - log.Infof("Successfully notified user %s request marked as %s", reqData.User, tag) + log.InfoContext(ctx, "Successfully notified user", + "user", reqData.User, + "resolution", tag, + ) return nil } @@ -545,13 +559,11 @@ func (a *App) getResourceNames(ctx context.Context, req types.AccessRequest) ([] // tryApproveRequest attempts to automatically approve the access request if the // user is on call for the configured service/team. func (a *App) tryApproveRequest(ctx context.Context, reqID string, req types.AccessRequest) error { - log := logger.Get(ctx). - WithField("req_id", reqID). - WithField("user", req.GetUser()) + log := logger.Get(ctx).With("req_id", reqID, "user", req.GetUser()) oncallUsers, err := a.bot.FetchOncallUsers(ctx, req) if trace.IsNotImplemented(err) { - log.Debugf("Skipping auto-approval because %q bot does not support automatic approvals.", a.pluginName) + log.DebugContext(ctx, "Skipping auto-approval because bot does not support automatic approvals", "bot", a.pluginName) return nil } if err != nil { @@ -559,7 +571,7 @@ func (a *App) tryApproveRequest(ctx context.Context, reqID string, req types.Acc } if !slices.Contains(oncallUsers, req.GetUser()) { - log.Debug("Skipping approval because user is not on-call.") + log.DebugContext(ctx, "Skipping approval because user is not on-call") return nil } @@ -573,12 +585,12 @@ func (a *App) tryApproveRequest(ctx context.Context, reqID string, req types.Acc }, }); err != nil { if strings.HasSuffix(err.Error(), "has already reviewed this request") { - log.Debug("Request has already been reviewed.") + log.DebugContext(ctx, "Request has already been reviewed") return nil } return trace.Wrap(err) } - log.Info("Successfully submitted a request approval.") + log.InfoContext(ctx, "Successfully submitted a request approval") return nil } diff --git a/integrations/access/common/app.go b/integrations/access/common/app.go index 805c0dde6ef8a..6c174e1422b75 100644 --- a/integrations/access/common/app.go +++ b/integrations/access/common/app.go @@ -88,7 +88,7 @@ func (a *BaseApp) WaitReady(ctx context.Context) (bool, error) { func (a *BaseApp) checkTeleportVersion(ctx context.Context) (proto.PingResponse, error) { log := logger.Get(ctx) - log.Debug("Checking Teleport server version") + log.DebugContext(ctx, "Checking Teleport server version") pong, err := a.APIClient.Ping(ctx) if err != nil { @@ -156,9 +156,9 @@ func (a *BaseApp) run(ctx context.Context) error { a.mainJob.SetReady(allOK) if allOK { - log.Info("Plugin is ready") + log.InfoContext(ctx, "Plugin is ready") } else { - log.Error("Plugin is not ready") + log.ErrorContext(ctx, "Plugin is not ready") } for _, app := range a.apps { @@ -203,11 +203,11 @@ func (a *BaseApp) init(ctx context.Context) error { } } - log.Debug("Starting API health check...") + log.DebugContext(ctx, "Starting API health check") if err = a.Bot.CheckHealth(ctx); err != nil { return trace.Wrap(err, "API health check failed") } - log.Debug("API health check finished ok") + log.DebugContext(ctx, "API health check finished ok") return nil } diff --git a/integrations/access/common/auth/token_provider.go b/integrations/access/common/auth/token_provider.go index f4ae33936a709..e0c23b0b36427 100644 --- a/integrations/access/common/auth/token_provider.go +++ b/integrations/access/common/auth/token_provider.go @@ -20,12 +20,12 @@ package auth import ( "context" + "log/slog" "sync" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/common/auth/oauth" "github.com/gravitational/teleport/integrations/access/common/auth/storage" @@ -65,7 +65,7 @@ type RotatedAccessTokenProviderConfig struct { Refresher oauth.Refresher Clock clockwork.Clock - Log *logrus.Entry + Log *slog.Logger } // CheckAndSetDefaults validates a configuration and sets default values @@ -87,7 +87,7 @@ func (c *RotatedAccessTokenProviderConfig) CheckAndSetDefaults() error { c.Clock = clockwork.NewRealClock() } if c.Log == nil { - c.Log = logrus.NewEntry(logrus.StandardLogger()) + c.Log = slog.Default() } return nil } @@ -104,7 +104,7 @@ type RotatedAccessTokenProvider struct { refresher oauth.Refresher clock clockwork.Clock - log logrus.FieldLogger + log *slog.Logger lock sync.RWMutex // protects the below fields creds *storage.Credentials @@ -153,12 +153,12 @@ func (r *RotatedAccessTokenProvider) RefreshLoop(ctx context.Context) { timer := r.clock.NewTimer(interval) defer timer.Stop() - r.log.Infof("Will attempt token refresh in: %s", interval) + r.log.InfoContext(ctx, "Starting token refresh loop", "next_refresh", interval) for { select { case <-ctx.Done(): - r.log.Info("Shutting down") + r.log.InfoContext(ctx, "Shutting down") return case <-timer.Chan(): creds, _ := r.store.GetCredentials(ctx) @@ -174,18 +174,21 @@ func (r *RotatedAccessTokenProvider) RefreshLoop(ctx context.Context) { interval := r.getRefreshInterval(creds) timer.Reset(interval) - r.log.Infof("Next refresh in: %s", interval) + r.log.InfoContext(ctx, "Refreshed token", "next_refresh", interval) continue } creds, err := r.refresh(ctx) if err != nil { - r.log.Errorf("Error while refreshing: %s. Will retry after: %s", err, r.retryInterval) + r.log.ErrorContext(ctx, "Error while refreshing token", + "error", err, + "retry_interval", r.retryInterval, + ) timer.Reset(r.retryInterval) } else { err := r.store.PutCredentials(ctx, creds) if err != nil { - r.log.Errorf("Error while storing the refreshed credentials: %s", err) + r.log.ErrorContext(ctx, "Error while storing the refreshed credentials", "error", err) timer.Reset(r.retryInterval) continue } @@ -196,7 +199,7 @@ func (r *RotatedAccessTokenProvider) RefreshLoop(ctx context.Context) { interval := r.getRefreshInterval(creds) timer.Reset(interval) - r.log.Infof("Successfully refreshed credentials. Next refresh in: %s", interval) + r.log.InfoContext(ctx, "Successfully refreshed credentials", "next_refresh", interval) } } } diff --git a/integrations/access/common/auth/token_provider_test.go b/integrations/access/common/auth/token_provider_test.go index fca79776ba024..e4f02ec3d3ae5 100644 --- a/integrations/access/common/auth/token_provider_test.go +++ b/integrations/access/common/auth/token_provider_test.go @@ -20,12 +20,12 @@ package auth import ( "context" + "log/slog" "testing" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/integrations/access/common/auth/oauth" @@ -57,9 +57,6 @@ func (s *mockStore) PutCredentials(ctx context.Context, creds *storage.Credentia } func TestRotatedAccessTokenProvider(t *testing.T) { - log := logrus.New() - log.Level = logrus.DebugLevel - newProvider := func(ctx context.Context, store storage.Store, refresher oauth.Refresher, clock clockwork.Clock, initialCreds *storage.Credentials) *RotatedAccessTokenProvider { return &RotatedAccessTokenProvider{ store: store, @@ -70,7 +67,7 @@ func TestRotatedAccessTokenProvider(t *testing.T) { tokenBufferInterval: 1 * time.Hour, creds: initialCreds, - log: log, + log: slog.Default(), } } diff --git a/integrations/access/datadog/bot.go b/integrations/access/datadog/bot.go index e92dbbb524a20..4e1f52a6c218d 100644 --- a/integrations/access/datadog/bot.go +++ b/integrations/access/datadog/bot.go @@ -162,7 +162,7 @@ func (b Bot) FetchOncallUsers(ctx context.Context, req types.AccessRequest) ([]s annotationKey := types.TeleportNamespace + types.ReqAnnotationApproveSchedulesLabel teamNames, err := common.GetNamesFromAnnotations(req, annotationKey) if err != nil { - log.Debug("Automatic approvals annotation is empty or unspecified.") + log.DebugContext(ctx, "Automatic approvals annotation is empty or unspecified") return nil, nil } diff --git a/integrations/access/datadog/client.go b/integrations/access/datadog/client.go index 489eb0c51a44d..2d4ebf79ea5f2 100644 --- a/integrations/access/datadog/client.go +++ b/integrations/access/datadog/client.go @@ -126,7 +126,7 @@ func onAfterDatadogResponse(sink common.StatusSink) resty.ResponseMiddleware { defer cancel() if err := sink.Emit(ctx, status); err != nil { - log.WithError(err).Errorf("Error while emitting Datadog Incident Management plugin status: %v", err) + log.ErrorContext(ctx, "Error while emitting Datadog Incident Management plugin status", "error", err) } } diff --git a/integrations/access/datadog/cmd/teleport-datadog/main.go b/integrations/access/datadog/cmd/teleport-datadog/main.go index cb9cbd1959771..84a6a14c0955f 100644 --- a/integrations/access/datadog/cmd/teleport-datadog/main.go +++ b/integrations/access/datadog/cmd/teleport-datadog/main.go @@ -22,6 +22,7 @@ import ( "context" _ "embed" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -67,12 +68,13 @@ func main() { if err := run(*path, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } } func run(configPath string, debug bool) error { + ctx := context.Background() conf, err := datadog.LoadConfig(configPath) if err != nil { return trace.Wrap(err) @@ -86,14 +88,15 @@ func run(configPath string, debug bool) error { return err } if debug { - logger.Standard().Debugf("DEBUG logging enabled") + slog.DebugContext(ctx, "DEBUG logging enabled") } app := datadog.NewDatadogApp(conf) go lib.ServeSignals(app, common.PluginShutdownTimeout) - logger.Standard().Infof("Starting Teleport Access Datadog Incident Management Plugin %s:%s", teleport.Version, teleport.Gitref) - return trace.Wrap( - app.Run(context.Background()), + slog.InfoContext(ctx, "Starting Teleport Access Datadog Incident Management Plugin", + "version", teleport.Version, + "git_ref", teleport.Gitref, ) + return trace.Wrap(app.Run(ctx)) } diff --git a/integrations/access/datadog/testlib/fake_datadog.go b/integrations/access/datadog/testlib/fake_datadog.go index 64ef2e35b93b7..5cfe8b539f454 100644 --- a/integrations/access/datadog/testlib/fake_datadog.go +++ b/integrations/access/datadog/testlib/fake_datadog.go @@ -32,7 +32,6 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/datadog" ) @@ -281,6 +280,6 @@ func (d *FakeDatadog) GetOncallTeams() (map[string][]string, bool) { func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/discord/bot.go b/integrations/access/discord/bot.go index ca231bdf83a93..576606998b23c 100644 --- a/integrations/access/discord/bot.go +++ b/integrations/access/discord/bot.go @@ -94,8 +94,7 @@ func emitStatusUpdate(resp *resty.Response, statusSink common.StatusSink) { if err := statusSink.Emit(ctx, status); err != nil { logger.Get(resp.Request.Context()). - WithError(err). - Errorf("Error while emitting Discord plugin status: %v", err) + ErrorContext(ctx, "Error while emitting Discord plugin status", "error", err) } } diff --git a/integrations/access/discord/cmd/teleport-discord/main.go b/integrations/access/discord/cmd/teleport-discord/main.go index cd19ce64591b6..f624b407742ba 100644 --- a/integrations/access/discord/cmd/teleport-discord/main.go +++ b/integrations/access/discord/cmd/teleport-discord/main.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -65,12 +66,13 @@ func main() { if err := run(*path, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } } func run(configPath string, debug bool) error { + ctx := context.Background() conf, err := discord.LoadDiscordConfig(configPath) if err != nil { return trace.Wrap(err) @@ -84,14 +86,15 @@ func run(configPath string, debug bool) error { return trace.Wrap(err) } if debug { - logger.Standard().Debugf("DEBUG logging enabled") + slog.DebugContext(ctx, "DEBUG logging enabled") } app := discord.NewApp(conf) go lib.ServeSignals(app, common.PluginShutdownTimeout) - logger.Standard().Infof("Starting Teleport Access Discord Plugin %s:%s", teleport.Version, teleport.Gitref) - return trace.Wrap( - app.Run(context.Background()), + slog.InfoContext(ctx, "Starting Teleport Access Discord Plugin", + "version", teleport.Version, + "git_ref", teleport.Gitref, ) + return trace.Wrap(app.Run(ctx)) } diff --git a/integrations/access/discord/testlib/fake_discord.go b/integrations/access/discord/testlib/fake_discord.go index c5a176446be5b..0a059d8ac81e2 100644 --- a/integrations/access/discord/testlib/fake_discord.go +++ b/integrations/access/discord/testlib/fake_discord.go @@ -32,7 +32,6 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/discord" ) @@ -188,6 +187,6 @@ func (s *FakeDiscord) CheckMessageUpdateByResponding(ctx context.Context) (disco func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/email/app.go b/integrations/access/email/app.go index 07bb3b558080e..cae9c33ed5315 100644 --- a/integrations/access/email/app.go +++ b/integrations/access/email/app.go @@ -18,6 +18,7 @@ package email import ( "context" + "log/slog" "slices" "time" @@ -32,6 +33,7 @@ import ( "github.com/gravitational/teleport/integrations/lib/logger" "github.com/gravitational/teleport/integrations/lib/watcherjob" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -90,7 +92,6 @@ func (a *App) run(ctx context.Context) error { var err error log := logger.Get(ctx) - log.Infof("Starting Teleport Access Email Plugin") if err = a.init(ctx); err != nil { return trace.Wrap(err) @@ -137,9 +138,9 @@ func (a *App) run(ctx context.Context) error { a.mainJob.SetReady(ok) if ok { - log.Info("Plugin is ready") + log.InfoContext(ctx, "Plugin is ready") } else { - log.Error("Plugin is not ready") + log.ErrorContext(ctx, "Plugin is not ready") } <-watcherJob.Done() @@ -186,24 +187,24 @@ func (a *App) init(ctx context.Context) error { }, }) - log.Debug("Starting client connection health check...") + log.DebugContext(ctx, "Starting client connection health check") if err = a.client.CheckHealth(ctx); err != nil { return trace.Wrap(err, "client connection health check failed") } - log.Debug("Client connection health check finished ok") + log.DebugContext(ctx, "Client connection health check finished ok") return nil } // checkTeleportVersion checks that Teleport version is not lower than required func (a *App) checkTeleportVersion(ctx context.Context) (proto.PingResponse, error) { log := logger.Get(ctx) - log.Debug("Checking Teleport server version") + log.DebugContext(ctx, "Checking Teleport server version") pong, err := a.apiClient.Ping(ctx) if err != nil { if trace.IsNotImplemented(err) { return pong, trace.Wrap(err, "server version must be at least %s", minServerVersion) } - log.Error("Unable to get Teleport server version") + log.ErrorContext(ctx, "Unable to get Teleport server version") return pong, trace.Wrap(err) } err = utils.CheckMinVersion(pong.ServerVersion, minServerVersion) @@ -229,16 +230,16 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error } op := event.Type reqID := event.Resource.GetName() - ctx, _ = logger.WithField(ctx, "request_id", reqID) + ctx, _ = logger.With(ctx, "request_id", reqID) switch op { case types.OpPut: - ctx, _ = logger.WithField(ctx, "request_op", "put") + ctx, _ = logger.With(ctx, "request_op", "put") req, ok := event.Resource.(types.AccessRequest) if !ok { return trace.Errorf("unexpected resource type %T", event.Resource) } - ctx, log := logger.WithField(ctx, "request_state", req.GetState().String()) + ctx, log := logger.With(ctx, "request_state", req.GetState().String()) var err error switch { @@ -249,21 +250,31 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error case req.GetState().IsDenied(): err = a.onResolvedRequest(ctx, req) default: - log.WithField("event", event).Warn("Unknown request state") + log.WarnContext(ctx, "Unknown request state", + slog.Group("event", + slog.Any("type", logutils.StringerAttr(event.Type)), + slog.Group("resource", + "kind", event.Resource.GetKind(), + "name", event.Resource.GetName(), + ), + ), + ) + + log.With("event", event).WarnContext(ctx, "Unknown request state") return nil } if err != nil { - log.WithError(err).Errorf("Failed to process request") + log.ErrorContext(ctx, "Failed to process request", "error", err) return trace.Wrap(err) } return nil case types.OpDelete: - ctx, log := logger.WithField(ctx, "request_op", "delete") + ctx, log := logger.With(ctx, "request_op", "delete") if err := a.onDeletedRequest(ctx, reqID); err != nil { - log.WithError(err).Errorf("Failed to process deleted request") + log.ErrorContext(ctx, "Failed to process deleted request", "error", err) return trace.Wrap(err) } return nil @@ -292,7 +303,7 @@ func (a *App) onPendingRequest(ctx context.Context, req types.AccessRequest) err if isNew { recipients := a.getRecipients(ctx, req) if len(recipients) == 0 { - log.Warning("No recipients to send") + log.WarnContext(ctx, "No recipients to send") return nil } @@ -329,7 +340,7 @@ func (a *App) onResolvedRequest(ctx context.Context, req types.AccessRequest) er case types.RequestState_DENIED: resolution.Tag = ResolvedDenied default: - logger.Get(ctx).Warningf("Unknown state %v (%s)", state, state.String()) + logger.Get(ctx).WarnContext(ctx, "Unknown state", "state", logutils.StringerAttr(state)) return replyErr } err := trace.Wrap(a.sendResolution(ctx, req.GetName(), resolution)) @@ -359,7 +370,7 @@ func (a *App) getRecipients(ctx context.Context, req types.AccessRequest) []comm rawRecipients := a.conf.RoleToRecipients.GetRawRecipientsFor(req.GetRoles(), req.GetSuggestedReviewers()) for _, rawRecipient := range rawRecipients { if !lib.IsEmail(rawRecipient) { - log.Warningf("Failed to notify a reviewer: %q does not look like a valid email", rawRecipient) + log.WarnContext(ctx, "Failed to notify a suggested reviewer with an invalid email address", "reviewer", rawRecipient) continue } recipientSet.Add(common.Recipient{ @@ -382,7 +393,7 @@ func (a *App) sendNewThreads(ctx context.Context, recipients []common.Recipient, logSentThreads(ctx, threadsSent, "new threads") if err != nil { - logger.Get(ctx).WithError(err).Error("Failed send one or more messages") + logger.Get(ctx).ErrorContext(ctx, "Failed send one or more messages", "error", err) } _, err = a.modifyPluginData(ctx, reqID, func(existing *PluginData) (PluginData, bool) { @@ -425,7 +436,7 @@ func (a *App) sendReviews(ctx context.Context, reqID string, reqData RequestData return trace.Wrap(err) } if !ok { - logger.Get(ctx).Debug("Failed to post reply: plugin data is missing") + logger.Get(ctx).DebugContext(ctx, "Failed to post reply: plugin data is missing") return nil } reviews := reqReviews[oldCount:] @@ -439,7 +450,11 @@ func (a *App) sendReviews(ctx context.Context, reqID string, reqData RequestData if err != nil { errors = append(errors, err) } - logger.Get(ctx).Infof("New review for request %v by %v is %v", reqID, review.Author, review.ProposedState.String()) + logger.Get(ctx).InfoContext(ctx, "New review for request", + "request_id", reqID, + "author", review.Author, + "state", logutils.StringerAttr(review.ProposedState), + ) logSentThreads(ctx, threadsSent, "new review") } @@ -473,7 +488,7 @@ func (a *App) sendResolution(ctx context.Context, reqID string, resolution Resol return trace.Wrap(err) } if !ok { - log.Debug("Failed to update messages: plugin data is missing") + log.DebugContext(ctx, "Failed to update messages: plugin data is missing") return nil } @@ -482,7 +497,7 @@ func (a *App) sendResolution(ctx context.Context, reqID string, resolution Resol threadsSent, err := a.client.SendResolution(ctx, threads, reqID, reqData) logSentThreads(ctx, threadsSent, "request resolved") - log.Infof("Marked request as %s and sent emails!", resolution.Tag) + log.InfoContext(ctx, "Marked request with resolution and sent emails", "resolution", resolution.Tag) if err != nil { return trace.Wrap(err) @@ -567,10 +582,11 @@ func (a *App) updatePluginData(ctx context.Context, reqID string, data PluginDat // logSentThreads logs successfully sent emails func logSentThreads(ctx context.Context, threads []EmailThread, kind string) { for _, thread := range threads { - logger.Get(ctx).WithFields(logger.Fields{ - "email": thread.Email, - "timestamp": thread.Timestamp, - "message_id": thread.MessageID, - }).Infof("Successfully sent %v!", kind) + logger.Get(ctx).InfoContext(ctx, "Successfully sent", + "email", thread.Email, + "timestamp", thread.Timestamp, + "message_id", thread.MessageID, + "kind", kind, + ) } } diff --git a/integrations/access/email/client.go b/integrations/access/email/client.go index 6ef1d2f04144e..f687f5deb0009 100644 --- a/integrations/access/email/client.go +++ b/integrations/access/email/client.go @@ -61,16 +61,16 @@ func NewClient(ctx context.Context, conf Config, clusterName, webProxyAddr strin if conf.Mailgun != nil { mailer = NewMailgunMailer(*conf.Mailgun, conf.StatusSink, conf.Delivery.Sender, clusterName, conf.RoleToRecipients[types.Wildcard]) - logger.Get(ctx).WithField("domain", conf.Mailgun.Domain).Info("Using Mailgun as email transport") + logger.Get(ctx).InfoContext(ctx, "Using Mailgun as email transport", "domain", conf.Mailgun.Domain) } if conf.SMTP != nil { mailer = NewSMTPMailer(*conf.SMTP, conf.StatusSink, conf.Delivery.Sender, clusterName) - logger.Get(ctx).WithFields(logger.Fields{ - "host": conf.SMTP.Host, - "port": conf.SMTP.Port, - "username": conf.SMTP.Username, - }).Info("Using SMTP as email transport") + logger.Get(ctx).InfoContext(ctx, "Using SMTP as email transport", + "host", conf.SMTP.Host, + "port", conf.SMTP.Port, + "username", conf.SMTP.Username, + ) } return Client{ diff --git a/integrations/access/email/cmd/teleport-email/main.go b/integrations/access/email/cmd/teleport-email/main.go index 840c80da76177..ccaec3acbed36 100644 --- a/integrations/access/email/cmd/teleport-email/main.go +++ b/integrations/access/email/cmd/teleport-email/main.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -65,12 +66,13 @@ func main() { if err := run(*path, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } } func run(configPath string, debug bool) error { + ctx := context.Background() conf, err := email.LoadConfig(configPath) if err != nil { return trace.Wrap(err) @@ -84,11 +86,11 @@ func run(configPath string, debug bool) error { return err } if debug { - logger.Standard().Debugf("DEBUG logging enabled") + slog.DebugContext(ctx, "DEBUG logging enabled") } if conf.Delivery.Recipients != nil { - logger.Standard().Warn("The delivery.recipients config option is deprecated, set role_to_recipients[\"*\"] instead for the same functionality") + slog.WarnContext(ctx, "The delivery.recipients config option is deprecated, set role_to_recipients[\"*\"] instead for the same functionality") } app, err := email.NewApp(*conf) @@ -98,8 +100,9 @@ func run(configPath string, debug bool) error { go lib.ServeSignals(app, common.PluginShutdownTimeout) - logger.Standard().Infof("Starting Teleport Access Email Plugin %s:%s", teleport.Version, teleport.Gitref) - return trace.Wrap( - app.Run(context.Background()), + slog.InfoContext(ctx, "Starting Teleport Access Email Plugin", + "version", teleport.Version, + "git_ref", teleport.Gitref, ) + return trace.Wrap(app.Run(ctx)) } diff --git a/integrations/access/email/mailers.go b/integrations/access/email/mailers.go index 60d5b4592449f..5cbd3d98bee02 100644 --- a/integrations/access/email/mailers.go +++ b/integrations/access/email/mailers.go @@ -114,7 +114,7 @@ func (m *SMTPMailer) CheckHealth(ctx context.Context) error { return trace.Wrap(err) } if err := client.Close(); err != nil { - log.Debug("Failed to close client connection after health check") + log.DebugContext(ctx, "Failed to close client connection after health check") } return nil } @@ -191,7 +191,7 @@ func (m *SMTPMailer) emitStatus(ctx context.Context, statusErr error) { code = http.StatusInternalServerError } if err := m.sink.Emit(ctx, common.StatusFromStatusCode(code)); err != nil { - log.WithError(err).Error("Error while emitting Email plugin status") + log.ErrorContext(ctx, "Error while emitting Email plugin status", "error", err) } } @@ -252,7 +252,7 @@ func (t *statusSinkTransport) RoundTrip(req *http.Request) (*http.Response, erro status := common.StatusFromStatusCode(resp.StatusCode) if err := t.sink.Emit(ctx, status); err != nil { - log.WithError(err).Error("Error while emitting Email plugin status") + log.ErrorContext(ctx, "Error while emitting Email plugin status", "error", err) } } return resp, nil diff --git a/integrations/access/email/testlib/mock_mailgun.go b/integrations/access/email/testlib/mock_mailgun.go index 58cbbc8ebb098..7895a5cdcaefe 100644 --- a/integrations/access/email/testlib/mock_mailgun.go +++ b/integrations/access/email/testlib/mock_mailgun.go @@ -24,7 +24,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" ) const ( @@ -58,7 +57,8 @@ func newMockMailgunServer(concurrency int) *mockMailgunServer { s := httptest.NewUnstartedServer(func(mg *mockMailgunServer) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if err := r.ParseMultipartForm(multipartFormBufSize); err != nil { - log.Error(err) + w.WriteHeader(http.StatusInternalServerError) + return } id := uuid.New().String() diff --git a/integrations/access/jira/app.go b/integrations/access/jira/app.go index 2aab94e887f0d..c8e6c8273ec02 100644 --- a/integrations/access/jira/app.go +++ b/integrations/access/jira/app.go @@ -21,6 +21,7 @@ package jira import ( "context" "fmt" + "log/slog" "net/url" "regexp" "strings" @@ -40,6 +41,7 @@ import ( "github.com/gravitational/teleport/integrations/lib/logger" "github.com/gravitational/teleport/integrations/lib/watcherjob" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -125,7 +127,6 @@ func (a *App) run(ctx context.Context) error { var err error log := logger.Get(ctx) - log.Infof("Starting Teleport Jira Plugin") if err = a.init(ctx); err != nil { return trace.Wrap(err) @@ -164,9 +165,9 @@ func (a *App) run(ctx context.Context) error { ok := (a.webhookSrv == nil || httpOk) && watcherOk a.mainJob.SetReady(ok) if ok { - log.Info("Plugin is ready") + log.InfoContext(ctx, "Plugin is ready") } else { - log.Error("Plugin is not ready") + log.ErrorContext(ctx, "Plugin is not ready") } if httpJob != nil { @@ -205,11 +206,11 @@ func (a *App) init(ctx context.Context) error { return trace.Wrap(err) } - log.Debug("Starting Jira API health check...") + log.DebugContext(ctx, "Starting Jira API health check") if err = a.jira.HealthCheck(ctx); err != nil { return trace.Wrap(err, "api health check failed") } - log.Debug("Jira API health check finished ok") + log.DebugContext(ctx, "Jira API health check finished ok") if !a.conf.DisableWebhook { webhookSrv, err := NewWebhookServer(a.conf.HTTP, a.onJiraWebhook) @@ -227,13 +228,13 @@ func (a *App) init(ctx context.Context) error { func (a *App) checkTeleportVersion(ctx context.Context) (proto.PingResponse, error) { log := logger.Get(ctx) - log.Debug("Checking Teleport server version") + log.DebugContext(ctx, "Checking Teleport server version") pong, err := a.teleport.Ping(ctx) if err != nil { if trace.IsNotImplemented(err) { return pong, trace.Wrap(err, "server version must be at least %s", minServerVersion) } - log.Error("Unable to get Teleport server version") + log.ErrorContext(ctx, "Unable to get Teleport server version") return pong, trace.Wrap(err) } err = utils.CheckMinVersion(pong.ServerVersion, minServerVersion) @@ -246,17 +247,17 @@ func (a *App) onWatcherEvent(ctx context.Context, event types.Event) error { } op := event.Type reqID := event.Resource.GetName() - ctx, _ = logger.WithField(ctx, "request_id", reqID) + ctx, _ = logger.With(ctx, "request_id", reqID) switch op { case types.OpPut: - ctx, _ = logger.WithField(ctx, "request_op", "put") + ctx, _ = logger.With(ctx, "request_op", "put") req, ok := event.Resource.(types.AccessRequest) if !ok { return trace.Errorf("unexpected resource type %T", event.Resource) } - ctx, log := logger.WithField(ctx, "request_state", req.GetState().String()) - log.Debug("Processing watcher event") + ctx, log := logger.With(ctx, "request_state", req.GetState().String()) + log.DebugContext(ctx, "Processing watcher event") var err error switch { @@ -265,21 +266,29 @@ func (a *App) onWatcherEvent(ctx context.Context, event types.Event) error { case req.GetState().IsResolved(): err = a.onResolvedRequest(ctx, req) default: - log.WithField("event", event).Warn("Unknown request state") + log.WarnContext(ctx, "Unknown request state", + slog.Group("event", + slog.Any("type", logutils.StringerAttr(event.Type)), + slog.Group("resource", + "kind", event.Resource.GetKind(), + "name", event.Resource.GetName(), + ), + ), + ) return nil } if err != nil { - log.WithError(err).Error("Failed to process request") + log.ErrorContext(ctx, "Failed to process request", "error", err) return trace.Wrap(err) } return nil case types.OpDelete: - ctx, log := logger.WithField(ctx, "request_op", "delete") + ctx, log := logger.With(ctx, "request_op", "delete") if err := a.onDeletedRequest(ctx, reqID); err != nil { - log.WithError(err).Errorf("Failed to process deleted request") + log.ErrorContext(ctx, "Failed to process deleted request", "error", err) return trace.Wrap(err) } return nil @@ -299,10 +308,11 @@ func (a *App) onJiraWebhook(_ context.Context, webhook Webhook) error { return nil } - ctx, log := logger.WithFields(ctx, logger.Fields{ - "jira_issue_id": webhook.Issue.ID, - }) - log.Debugf("Processing incoming webhook event %q with type %q", webhookEvent, issueEventTypeName) + ctx, log := logger.With(ctx, "jira_issue_id", webhook.Issue.ID) + log.DebugContext(ctx, "Processing incoming webhook event", + "event", webhookEvent, + "event_type", issueEventTypeName, + ) if webhook.Issue == nil { return trace.Errorf("got webhook without issue info") @@ -333,20 +343,20 @@ func (a *App) onJiraWebhook(_ context.Context, webhook Webhook) error { if statusName == "" { return trace.Errorf("getting Jira issue status: %w", err) } - log.Warnf("Using most recent successful getIssue response: %v", err) + log.WarnContext(ctx, "Using most recent successful getIssue response", "error", err) } - ctx, log = logger.WithFields(ctx, logger.Fields{ - "jira_issue_id": issue.ID, - "jira_issue_key": issue.Key, - }) + ctx, log = logger.With(ctx, + "jira_issue_id", issue.ID, + "jira_issue_key", issue.Key, + ) switch { case statusName == "pending": - log.Debug("Issue has pending status, ignoring it") + log.DebugContext(ctx, "Issue has pending status, ignoring it") return nil case statusName == "expired": - log.Debug("Issue has expired status, ignoring it") + log.DebugContext(ctx, "Issue has expired status, ignoring it") return nil case statusName != "approved" && statusName != "denied": return trace.BadParameter("unknown Jira status %s", statusName) @@ -357,11 +367,11 @@ func (a *App) onJiraWebhook(_ context.Context, webhook Webhook) error { return trace.Wrap(err) } if reqID == "" { - log.Debugf("Missing %q issue property", RequestIDPropertyKey) + log.DebugContext(ctx, "Missing teleportAccessRequestId issue property") return nil } - ctx, log = logger.WithField(ctx, "request_id", reqID) + ctx, log = logger.With(ctx, "request_id", reqID) reqs, err := a.teleport.GetAccessRequests(ctx, types.AccessRequestFilter{ID: reqID}) if err != nil { @@ -382,8 +392,9 @@ func (a *App) onJiraWebhook(_ context.Context, webhook Webhook) error { return trace.Errorf("plugin data is blank") } if pluginData.IssueID != issue.ID { - log.WithField("plugin_data_issue_id", pluginData.IssueID). - Debug("plugin_data.issue_id does not match issue.id") + log.DebugContext(ctx, "plugin_data.issue_id does not match issue.id", + "plugin_data_issue_id", pluginData.IssueID, + ) return trace.Errorf("issue_id from request's plugin_data does not match") } @@ -406,17 +417,17 @@ func (a *App) onJiraWebhook(_ context.Context, webhook Webhook) error { author, reason, err := a.loadResolutionInfo(ctx, issue, statusName) if err != nil { - log.WithError(err).Error("Failed to load resolution info from the issue history") + log.ErrorContext(ctx, "Failed to load resolution info from the issue history", "error", err) } resolution.Reason = reason - ctx, _ = logger.WithFields(ctx, logger.Fields{ - "jira_user_email": author.EmailAddress, - "jira_user_name": author.DisplayName, - "request_user": req.GetUser(), - "request_roles": req.GetRoles(), - "reason": reason, - }) + ctx, _ = logger.With(ctx, + "jira_user_email", author.EmailAddress, + "jira_user_name", author.DisplayName, + "request_user", req.GetUser(), + "request_roles", req.GetRoles(), + "reason", reason, + ) if err := a.resolveRequest(ctx, reqID, author.EmailAddress, resolution); err != nil { return trace.Wrap(err) } @@ -498,11 +509,11 @@ func (a *App) createIssue(ctx context.Context, reqID string, reqData RequestData return trace.Wrap(err) } - ctx, log := logger.WithFields(ctx, logger.Fields{ - "jira_issue_id": data.IssueID, - "jira_issue_key": data.IssueKey, - }) - log.Info("Jira Issue created") + ctx, log := logger.With(ctx, + "jira_issue_id", data.IssueID, + "jira_issue_key", data.IssueKey, + ) + log.InfoContext(ctx, "Jira Issue created") // Save jira issue info in plugin data. _, err = a.modifyPluginData(ctx, reqID, func(existing *PluginData) (PluginData, bool) { @@ -551,11 +562,11 @@ func (a *App) addReviewComments(ctx context.Context, reqID string, reqReviews [] } if !ok { if issueID == "" { - logger.Get(ctx).Debug("Failed to add the comment: plugin data is blank") + logger.Get(ctx).DebugContext(ctx, "Failed to add the comment: plugin data is blank") } return nil } - ctx, _ = logger.WithField(ctx, "jira_issue_id", issueID) + ctx, _ = logger.With(ctx, "jira_issue_id", issueID) slice := reqReviews[oldCount:] if len(slice) == 0 { @@ -621,7 +632,7 @@ func (a *App) resolveRequest(ctx context.Context, reqID string, userEmail string return trace.Wrap(err) } - logger.Get(ctx).Infof("Jira user %s the request", resolution.Tag) + logger.Get(ctx).InfoContext(ctx, "Jira user processed the request", "resolution", resolution.Tag) return nil } @@ -658,18 +669,18 @@ func (a *App) resolveIssue(ctx context.Context, reqID string, resolution Resolut } if !ok { if issueID == "" { - logger.Get(ctx).Debug("Failed to resolve the issue: plugin data is blank") + logger.Get(ctx).DebugContext(ctx, "Failed to resolve the issue: plugin data is blank") } // Either plugin data is missing or issue is already resolved by us, just quit. return nil } - ctx, log := logger.WithField(ctx, "jira_issue_id", issueID) + ctx, log := logger.With(ctx, "jira_issue_id", issueID) if err := a.jira.ResolveIssue(ctx, issueID, resolution); err != nil { return trace.Wrap(err) } - log.Info("Successfully resolved the issue") + log.InfoContext(ctx, "Successfully resolved the issue") return nil } diff --git a/integrations/access/jira/client.go b/integrations/access/jira/client.go index 2877966af663b..a23381e4d2666 100644 --- a/integrations/access/jira/client.go +++ b/integrations/access/jira/client.go @@ -125,7 +125,7 @@ func NewJiraClient(conf JiraConfig, clusterName, teleportProxyAddr string, statu defer cancel() if err := statusSink.Emit(ctx, status); err != nil { - log.WithError(err).Errorf("Error while emitting Jira plugin status: %v", err) + log.ErrorContext(ctx, "Error while emitting Jira plugin status", "error", err) } } @@ -199,7 +199,7 @@ func (j *Jira) HealthCheck(ctx context.Context) error { } } - log.Debug("Checking out Jira project...") + log.DebugContext(ctx, "Checking out Jira project") var project Project _, err = j.client.NewRequest(). SetContext(ctx). @@ -209,9 +209,12 @@ func (j *Jira) HealthCheck(ctx context.Context) error { if err != nil { return trace.Wrap(err) } - log.Debugf("Found project %q named %q", project.Key, project.Name) + log.DebugContext(ctx, "Found Jira project", + "project", project.Key, + "project_name", project.Name, + ) - log.Debug("Checking out Jira project permissions...") + log.DebugContext(ctx, "Checking out Jira project permissions") queryOptions, err := query.Values(GetMyPermissionsQueryOptions{ ProjectKey: j.project, Permissions: jiraRequiredPermissions, @@ -433,7 +436,7 @@ func (j *Jira) ResolveIssue(ctx context.Context, issueID string, resolution Reso if err2 := trace.Wrap(j.TransitionIssue(ctx, issue.ID, transition.ID)); err2 != nil { return trace.NewAggregate(err1, err2) } - logger.Get(ctx).Debugf("Successfully moved the issue to the status %q", toStatus) + logger.Get(ctx).DebugContext(ctx, "Successfully moved the issue to the target status", "target_status", toStatus) return trace.Wrap(err1) } @@ -457,7 +460,7 @@ func (j *Jira) AddResolutionComment(ctx context.Context, id string, resolution R SetBody(CommentInput{Body: builder.String()}). Post("rest/api/2/issue/{issueID}/comment") if err == nil { - logger.Get(ctx).Debug("Successfully added a resolution comment to the issue") + logger.Get(ctx).DebugContext(ctx, "Successfully added a resolution comment to the issue") } return trace.Wrap(err) } diff --git a/integrations/access/jira/cmd/teleport-jira/main.go b/integrations/access/jira/cmd/teleport-jira/main.go index b2c2bb0672d06..851de27473296 100644 --- a/integrations/access/jira/cmd/teleport-jira/main.go +++ b/integrations/access/jira/cmd/teleport-jira/main.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -72,12 +73,13 @@ func main() { if err := run(*path, *insecure, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } } func run(configPath string, insecure bool, debug bool) error { + ctx := context.Background() conf, err := jira.LoadConfig(configPath) if err != nil { return trace.Wrap(err) @@ -91,7 +93,7 @@ func run(configPath string, insecure bool, debug bool) error { return err } if debug { - logger.Standard().Debugf("DEBUG logging enabled") + slog.DebugContext(ctx, "DEBUG logging enabled") } conf.HTTP.Insecure = insecure @@ -102,8 +104,9 @@ func run(configPath string, insecure bool, debug bool) error { go lib.ServeSignals(app, common.PluginShutdownTimeout) - logger.Standard().Infof("Starting Teleport Access Jira Plugin %s:%s", teleport.Version, teleport.Gitref) - return trace.Wrap( - app.Run(context.Background()), + slog.InfoContext(ctx, "Starting Teleport Access Jira Plugin", + "version", teleport.Version, + "git_ref", teleport.Gitref, ) + return trace.Wrap(app.Run(ctx)) } diff --git a/integrations/access/jira/testlib/fake_jira.go b/integrations/access/jira/testlib/fake_jira.go index 1da8c432ec3a9..9696500620aba 100644 --- a/integrations/access/jira/testlib/fake_jira.go +++ b/integrations/access/jira/testlib/fake_jira.go @@ -30,7 +30,6 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/jira" ) @@ -304,6 +303,6 @@ func (s *FakeJira) CheckIssueTransition(ctx context.Context) (jira.Issue, error) func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/jira/testlib/suite.go b/integrations/access/jira/testlib/suite.go index 38341d589fa5d..c2a3d421f442c 100644 --- a/integrations/access/jira/testlib/suite.go +++ b/integrations/access/jira/testlib/suite.go @@ -721,7 +721,7 @@ func (s *JiraSuiteOSS) TestRace() { defer cancel() var lastErr error for { - logger.Get(ctx).Infof("Trying to approve issue %q", issue.Key) + logger.Get(ctx).InfoContext(ctx, "Trying to approve issue", "issue_key", issue.Key) resp, err := s.postWebhook(ctx, s.webhookURL.String(), issue.ID, "Approved") if err != nil { if lib.IsDeadline(err) { diff --git a/integrations/access/jira/webhook_server.go b/integrations/access/jira/webhook_server.go index b83e449b992c8..e9e409959b40a 100644 --- a/integrations/access/jira/webhook_server.go +++ b/integrations/access/jira/webhook_server.go @@ -105,29 +105,31 @@ func (s *WebhookServer) processWebhook(rw http.ResponseWriter, r *http.Request, defer cancel() httpRequestID := fmt.Sprintf("%v-%v", time.Now().Unix(), atomic.AddUint64(&s.counter, 1)) - ctx, log := logger.WithField(ctx, "jira_http_id", httpRequestID) + ctx, log := logger.With(ctx, "jira_http_id", httpRequestID) var webhook Webhook body, err := io.ReadAll(io.LimitReader(r.Body, jiraWebhookPayloadLimit+1)) if err != nil { - log.WithError(err).Error("Failed to read webhook payload") + log.ErrorContext(ctx, "Failed to read webhook payload", "error", err) http.Error(rw, "", http.StatusInternalServerError) return } if len(body) > jiraWebhookPayloadLimit { - log.Error("Received a webhook larger than %d bytes", jiraWebhookPayloadLimit) + log.ErrorContext(ctx, "Received a webhook with a payload that exceeded the limit", + "payload_size", len(body), + "payload_size_limit", jiraWebhookPayloadLimit, + ) http.Error(rw, "", http.StatusRequestEntityTooLarge) } if err = json.Unmarshal(body, &webhook); err != nil { - log.WithError(err).Error("Failed to parse webhook payload") + log.ErrorContext(ctx, "Failed to parse webhook payload", "error", err) http.Error(rw, "", http.StatusBadRequest) return } if err = s.onWebhook(ctx, webhook); err != nil { - log.WithError(err).Error("Failed to process webhook") - log.Debugf("%v", trace.DebugReport(err)) + log.ErrorContext(ctx, "Failed to process webhook", "error", err) var code int switch { case lib.IsCanceled(err) || lib.IsDeadline(err): diff --git a/integrations/access/mattermost/bot.go b/integrations/access/mattermost/bot.go index c7de9d0aaae44..edf0a7e73264d 100644 --- a/integrations/access/mattermost/bot.go +++ b/integrations/access/mattermost/bot.go @@ -150,7 +150,7 @@ func NewBot(conf Config, clusterName, webProxyAddr string) (Bot, error) { ctx, cancel := context.WithTimeout(context.Background(), mmStatusEmitTimeout) defer cancel() if err := sink.Emit(ctx, status); err != nil { - log.Errorf("Error while emitting plugin status: %v", err) + log.ErrorContext(ctx, "Error while emitting plugin status", "error", err) } }() @@ -463,14 +463,14 @@ func (b Bot) buildPostText(reqID string, reqData pd.AccessRequestData) (string, } func (b Bot) tryLookupDirectChannel(ctx context.Context, userEmail string) string { - log := logger.Get(ctx).WithField("mm_user_email", userEmail) + log := logger.Get(ctx).With("mm_user_email", userEmail) channel, err := b.LookupDirectChannel(ctx, userEmail) if err != nil { var errResult *ErrorResult if errors.As(trace.Unwrap(err), &errResult) { - log.Warningf("Failed to lookup direct channel info: %q", errResult.Message) + log.WarnContext(ctx, "Failed to lookup direct channel info", "error", errResult.Message) } else { - log.WithError(err).Error("Failed to lookup direct channel info") + log.ErrorContext(ctx, "Failed to lookup direct channel info", "error", err) } return "" } @@ -478,17 +478,17 @@ func (b Bot) tryLookupDirectChannel(ctx context.Context, userEmail string) strin } func (b Bot) tryLookupChannel(ctx context.Context, team, name string) string { - log := logger.Get(ctx).WithFields(logger.Fields{ - "mm_team": team, - "mm_channel": name, - }) + log := logger.Get(ctx).With( + "mm_team", team, + "mm_channel", name, + ) channel, err := b.LookupChannel(ctx, team, name) if err != nil { var errResult *ErrorResult if errors.As(trace.Unwrap(err), &errResult) { - log.Warningf("Failed to lookup channel info: %q", errResult.Message) + log.WarnContext(ctx, "Failed to lookup channel info", "error", errResult.Message) } else { - log.WithError(err).Error("Failed to lookup channel info") + log.ErrorContext(ctx, "Failed to lookup channel info", "error", err) } return "" } diff --git a/integrations/access/mattermost/cmd/teleport-mattermost/main.go b/integrations/access/mattermost/cmd/teleport-mattermost/main.go index 7c4777b26655b..0c67abb62ef86 100644 --- a/integrations/access/mattermost/cmd/teleport-mattermost/main.go +++ b/integrations/access/mattermost/cmd/teleport-mattermost/main.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -65,12 +66,13 @@ func main() { if err := run(*path, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } } func run(configPath string, debug bool) error { + ctx := context.Background() conf, err := mattermost.LoadConfig(configPath) if err != nil { return trace.Wrap(err) @@ -84,14 +86,15 @@ func run(configPath string, debug bool) error { return err } if debug { - logger.Standard().Debugf("DEBUG logging enabled") + slog.DebugContext(ctx, "DEBUG logging enabled") } app := mattermost.NewMattermostApp(conf) go lib.ServeSignals(app, common.PluginShutdownTimeout) - logger.Standard().Infof("Starting Teleport Access Mattermost Plugin %s:%s", teleport.Version, teleport.Gitref) - return trace.Wrap( - app.Run(context.Background()), + slog.InfoContext(ctx, "Starting Teleport Access Mattermost Plugin", + "version", teleport.Version, + "git_ref", teleport.Gitref, ) + return trace.Wrap(app.Run(ctx)) } diff --git a/integrations/access/mattermost/testlib/fake_mattermost.go b/integrations/access/mattermost/testlib/fake_mattermost.go index 10cc048e743bd..b2c28287c6153 100644 --- a/integrations/access/mattermost/testlib/fake_mattermost.go +++ b/integrations/access/mattermost/testlib/fake_mattermost.go @@ -31,7 +31,6 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/mattermost" ) @@ -387,6 +386,6 @@ func (s *FakeMattermost) CheckPostUpdate(ctx context.Context) (mattermost.Post, func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/msteams/app.go b/integrations/access/msteams/app.go index 306be091ca8b0..b18c96ba3f4a3 100644 --- a/integrations/access/msteams/app.go +++ b/integrations/access/msteams/app.go @@ -62,14 +62,9 @@ type App struct { // NewApp initializes a new teleport-msteams app and returns it. func NewApp(conf Config) (*App, error) { - log, err := conf.Log.NewSLogLogger() - if err != nil { - return nil, trace.Wrap(err) - } - app := &App{ conf: conf, - log: log.With("plugin", pluginName), + log: slog.With("plugin", pluginName), } app.mainJob = lib.NewServiceJob(app.run) diff --git a/integrations/access/msteams/bot.go b/integrations/access/msteams/bot.go index c0598c1f4d24f..4292f856dba90 100644 --- a/integrations/access/msteams/bot.go +++ b/integrations/access/msteams/bot.go @@ -30,7 +30,6 @@ import ( "github.com/gravitational/teleport/integrations/access/common" "github.com/gravitational/teleport/integrations/access/msteams/msapi" "github.com/gravitational/teleport/integrations/lib" - "github.com/gravitational/teleport/integrations/lib/logger" "github.com/gravitational/teleport/integrations/lib/plugindata" ) @@ -469,7 +468,7 @@ func (b *Bot) CheckHealth(ctx context.Context) error { Code: status, ErrorMessage: message, }); err != nil { - logger.Get(ctx).Errorf("Error while emitting ms teams plugin status: %v", err) + b.log.ErrorContext(ctx, "Error while emitting ms teams plugin status", "error", err) } } return trace.Wrap(err) diff --git a/integrations/access/msteams/cmd/teleport-msteams/main.go b/integrations/access/msteams/cmd/teleport-msteams/main.go index 970df1ac98db4..75e66a46b7cf7 100644 --- a/integrations/access/msteams/cmd/teleport-msteams/main.go +++ b/integrations/access/msteams/cmd/teleport-msteams/main.go @@ -16,6 +16,7 @@ package main import ( "context" + "log/slog" "os" "time" @@ -99,7 +100,7 @@ func main() { if err := run(*startConfigPath, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } diff --git a/integrations/access/msteams/testlib/fake_msteams.go b/integrations/access/msteams/testlib/fake_msteams.go index ceb1a3edc2d41..f3e4d4c5550c2 100644 --- a/integrations/access/msteams/testlib/fake_msteams.go +++ b/integrations/access/msteams/testlib/fake_msteams.go @@ -30,7 +30,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/msteams/msapi" ) @@ -326,6 +325,6 @@ func (s *FakeTeams) CheckMessageUpdate(ctx context.Context) (Msg, error) { func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/msteams/uninstall.go b/integrations/access/msteams/uninstall.go index e60a9ce0c8ddd..22aa9e6961ab1 100644 --- a/integrations/access/msteams/uninstall.go +++ b/integrations/access/msteams/uninstall.go @@ -18,7 +18,8 @@ import ( "context" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" + + "github.com/gravitational/teleport/integrations/lib/logger" ) func Uninstall(ctx context.Context, configPath string) error { @@ -26,11 +27,13 @@ func Uninstall(ctx context.Context, configPath string) error { if err != nil { return trace.Wrap(err) } - err = checkApp(ctx, b) - if err != nil { + + if err := checkApp(ctx, b); err != nil { return trace.Wrap(err) } + log := logger.Get(ctx) + var errs []error for _, recipient := range c.Recipients.GetAllRawRecipients() { _, isChannel := b.checkChannelURL(recipient) @@ -38,11 +41,11 @@ func Uninstall(ctx context.Context, configPath string) error { errs = append(errs, b.UninstallAppForUser(ctx, recipient)) } } - err = trace.NewAggregate(errs...) - if err != nil { - log.Errorln("The following error(s) happened when uninstalling the Teams App:") + + if trace.NewAggregate(errs...) != nil { + log.ErrorContext(ctx, "Encountered error(s) when uninstalling the Teams App", "error", err) return err } - log.Info("Successfully uninstalled app for all recipients") + log.InfoContext(ctx, "Successfully uninstalled app for all recipients") return nil } diff --git a/integrations/access/msteams/validate.go b/integrations/access/msteams/validate.go index 61d9d25f635e8..7969d7edebe0d 100644 --- a/integrations/access/msteams/validate.go +++ b/integrations/access/msteams/validate.go @@ -17,6 +17,7 @@ package msteams import ( "context" "fmt" + "log/slog" "time" cards "github.com/DanielTitkov/go-adaptive-cards" @@ -142,11 +143,7 @@ func loadConfig(configPath string) (*Bot, *Config, error) { fmt.Printf(" - Checking application %v status...\n", c.MSAPI.TeamsAppID) - log, err := c.Log.NewSLogLogger() - if err != nil { - return nil, nil, trace.Wrap(err) - } - b, err := NewBot(c, "local", "", log) + b, err := NewBot(c, "local", "", slog.Default()) if err != nil { return nil, nil, trace.Wrap(err) } diff --git a/integrations/access/opsgenie/app.go b/integrations/access/opsgenie/app.go index 132389ad5b5a3..60950f31fa4b1 100644 --- a/integrations/access/opsgenie/app.go +++ b/integrations/access/opsgenie/app.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "time" @@ -39,6 +40,7 @@ import ( "github.com/gravitational/teleport/integrations/lib/logger" "github.com/gravitational/teleport/integrations/lib/watcherjob" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -115,7 +117,7 @@ func (a *App) run(ctx context.Context) error { var err error log := logger.Get(ctx) - log.Infof("Starting Teleport Access Opsgenie Plugin") + log.InfoContext(ctx, "Starting Teleport Access Opsgenie Plugin") if err = a.init(ctx); err != nil { return trace.Wrap(err) @@ -147,9 +149,9 @@ func (a *App) run(ctx context.Context) error { a.mainJob.SetReady(ok) if ok { - log.Info("Plugin is ready") + log.InfoContext(ctx, "Plugin is ready") } else { - log.Error("Plugin is not ready") + log.ErrorContext(ctx, "Plugin is not ready") } <-watcherJob.Done() @@ -177,24 +179,24 @@ func (a *App) init(ctx context.Context) error { } log := logger.Get(ctx) - log.Debug("Starting API health check...") + log.DebugContext(ctx, "Starting API health check") if err = a.opsgenie.CheckHealth(ctx); err != nil { return trace.Wrap(err, "API health check failed") } - log.Debug("API health check finished ok") + log.DebugContext(ctx, "API health check finished ok") return nil } func (a *App) checkTeleportVersion(ctx context.Context) (proto.PingResponse, error) { log := logger.Get(ctx) - log.Debug("Checking Teleport server version") + log.DebugContext(ctx, "Checking Teleport server version") pong, err := a.teleport.Ping(ctx) if err != nil { if trace.IsNotImplemented(err) { return pong, trace.Wrap(err, "server version must be at least %s", minServerVersion) } - log.Error("Unable to get Teleport server version") + log.ErrorContext(ctx, "Unable to get Teleport server version") return pong, trace.Wrap(err) } err = utils.CheckMinVersion(pong.ServerVersion, minServerVersion) @@ -219,16 +221,16 @@ func (a *App) handleAcessRequest(ctx context.Context, event types.Event) error { } op := event.Type reqID := event.Resource.GetName() - ctx, _ = logger.WithField(ctx, "request_id", reqID) + ctx, _ = logger.With(ctx, "request_id", reqID) switch op { case types.OpPut: - ctx, _ = logger.WithField(ctx, "request_op", "put") + ctx, _ = logger.With(ctx, "request_op", "put") req, ok := event.Resource.(types.AccessRequest) if !ok { return trace.Errorf("unexpected resource type %T", event.Resource) } - ctx, log := logger.WithField(ctx, "request_state", req.GetState().String()) + ctx, log := logger.With(ctx, "request_state", req.GetState().String()) var err error switch { @@ -237,21 +239,29 @@ func (a *App) handleAcessRequest(ctx context.Context, event types.Event) error { case req.GetState().IsResolved(): err = a.onResolvedRequest(ctx, req) default: - log.WithField("event", event).Warn("Unknown request state") + log.WarnContext(ctx, "Unknown request state", + slog.Group("event", + slog.Any("type", logutils.StringerAttr(event.Type)), + slog.Group("resource", + "kind", event.Resource.GetKind(), + "name", event.Resource.GetName(), + ), + ), + ) return nil } if err != nil { - log.WithError(err).Error("Failed to process request") + log.ErrorContext(ctx, "Failed to process request", "error", err) return trace.Wrap(err) } return nil case types.OpDelete: - ctx, log := logger.WithField(ctx, "request_op", "delete") + ctx, log := logger.With(ctx, "request_op", "delete") if err := a.onDeletedRequest(ctx, reqID); err != nil { - log.WithError(err).Error("Failed to process deleted request") + log.ErrorContext(ctx, "Failed to process deleted request", "error", err) return trace.Wrap(err) } return nil @@ -310,13 +320,13 @@ func (a *App) getNotifySchedulesAndTeams(ctx context.Context, req types.AccessRe scheduleAnnotationKey := types.TeleportNamespace + types.ReqAnnotationNotifySchedulesLabel schedules, err = common.GetNamesFromAnnotations(req, scheduleAnnotationKey) if err != nil { - log.Debugf("No schedules to notify in %s", scheduleAnnotationKey) + log.DebugContext(ctx, "No schedules to notify", "schedule", scheduleAnnotationKey) } teamAnnotationKey := types.TeleportNamespace + types.ReqAnnotationTeamsLabel teams, err = common.GetNamesFromAnnotations(req, teamAnnotationKey) if err != nil { - log.Debugf("No teams to notify in %s", teamAnnotationKey) + log.DebugContext(ctx, "No teams to notify", "teams", teamAnnotationKey) } if len(schedules) == 0 && len(teams) == 0 { @@ -336,7 +346,7 @@ func (a *App) tryNotifyService(ctx context.Context, req types.AccessRequest) (bo recipientSchedules, recipientTeams, err := a.getMessageRecipients(ctx, req) if err != nil { - log.Debugf("Skipping the notification: %s", err) + log.DebugContext(ctx, "Skipping notification", "error", err) return false, trace.Wrap(errMissingAnnotation) } @@ -434,8 +444,8 @@ func (a *App) createAlert(ctx context.Context, reqID string, reqData RequestData if err != nil { return trace.Wrap(err) } - ctx, log := logger.WithField(ctx, "opsgenie_alert_id", data.AlertID) - log.Info("Successfully created Opsgenie alert") + ctx, log := logger.With(ctx, "opsgenie_alert_id", data.AlertID) + log.InfoContext(ctx, "Successfully created Opsgenie alert") // Save opsgenie alert info in plugin data. _, err = a.modifyPluginData(ctx, reqID, func(existing *PluginData) (PluginData, bool) { @@ -479,10 +489,10 @@ func (a *App) postReviewNotes(ctx context.Context, reqID string, reqReviews []ty return trace.Wrap(err) } if !ok { - logger.Get(ctx).Debug("Failed to post the note: plugin data is missing") + logger.Get(ctx).DebugContext(ctx, "Failed to post the note: plugin data is missing") return nil } - ctx, _ = logger.WithField(ctx, "opsgenie_alert_id", data.AlertID) + ctx, _ = logger.With(ctx, "opsgenie_alert_id", data.AlertID) slice := reqReviews[oldCount:] if len(slice) == 0 { @@ -504,7 +514,7 @@ func (a *App) tryApproveRequest(ctx context.Context, req types.AccessRequest) er serviceNames, err := a.getOnCallServiceNames(req) if err != nil { - logger.Get(ctx).Debugf("Skipping the approval: %s", err) + logger.Get(ctx).DebugContext(ctx, "Skipping approval", "error", err) return nil } @@ -537,14 +547,14 @@ func (a *App) tryApproveRequest(ctx context.Context, req types.AccessRequest) er }, }); err != nil { if strings.HasSuffix(err.Error(), "has already reviewed this request") { - log.Debug("Already reviewed the request") + log.DebugContext(ctx, "Already reviewed the request") return nil } return trace.Wrap(err, "submitting access request") } } - log.Info("Successfully submitted a request approval") + log.InfoContext(ctx, "Successfully submitted a request approval") return nil } @@ -576,15 +586,15 @@ func (a *App) resolveAlert(ctx context.Context, reqID string, resolution Resolut return trace.Wrap(err) } if !ok { - logger.Get(ctx).Debug("Failed to resolve the alert: plugin data is missing") + logger.Get(ctx).DebugContext(ctx, "Failed to resolve the alert: plugin data is missing") return nil } - ctx, log := logger.WithField(ctx, "opsgenie_alert_id", alertID) + ctx, log := logger.With(ctx, "opsgenie_alert_id", alertID) if err := a.opsgenie.ResolveAlert(ctx, alertID, resolution); err != nil { return trace.Wrap(err) } - log.Info("Successfully resolved the alert") + log.InfoContext(ctx, "Successfully resolved the alert") return nil } diff --git a/integrations/access/opsgenie/client.go b/integrations/access/opsgenie/client.go index 2619c6ed6f7a9..2c8cdaec09a33 100644 --- a/integrations/access/opsgenie/client.go +++ b/integrations/access/opsgenie/client.go @@ -185,10 +185,10 @@ func (og Client) tryGetAlertRequestResult(ctx context.Context, reqID string) (Ge for { alertRequestResult, err := og.getAlertRequestResult(ctx, reqID) if err == nil { - logger.Get(ctx).Debugf("Got alert request result: %+v", alertRequestResult) + logger.Get(ctx).DebugContext(ctx, "Got alert request result", "alert_id", alertRequestResult.Data.AlertID) return alertRequestResult, nil } - logger.Get(ctx).Debug("Failed to get alert request result:", err) + logger.Get(ctx).DebugContext(ctx, "Failed to get alert request result", "error", err) if err := backoff.Do(ctx); err != nil { return GetAlertRequestResult{}, trace.Wrap(err) } @@ -344,8 +344,10 @@ func (og Client) CheckHealth(ctx context.Context) error { code = types.PluginStatusCode_OTHER_ERROR } if err := og.StatusSink.Emit(ctx, &types.PluginStatusV1{Code: code}); err != nil { - logger.Get(resp.Request.Context()).WithError(err). - WithField("code", resp.StatusCode()).Errorf("Error while emitting servicenow plugin status: %v", err) + logger.Get(resp.Request.Context()).ErrorContext(ctx, "Error while emitting servicenow plugin status", + "error", err, + "code", resp.StatusCode(), + ) } } diff --git a/integrations/access/opsgenie/testlib/fake_opsgenie.go b/integrations/access/opsgenie/testlib/fake_opsgenie.go index 9b5e6252119d1..1c124e19a75fc 100644 --- a/integrations/access/opsgenie/testlib/fake_opsgenie.go +++ b/integrations/access/opsgenie/testlib/fake_opsgenie.go @@ -32,7 +32,6 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/integrations/access/opsgenie" @@ -314,7 +313,7 @@ func (s *FakeOpsgenie) GetSchedule(scheduleName string) ([]opsgenie.Responder, b func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/pagerduty/app.go b/integrations/access/pagerduty/app.go index 5eadcc5147cd0..2351c5d2d5f02 100644 --- a/integrations/access/pagerduty/app.go +++ b/integrations/access/pagerduty/app.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "time" @@ -38,6 +39,7 @@ import ( "github.com/gravitational/teleport/integrations/lib/logger" "github.com/gravitational/teleport/integrations/lib/watcherjob" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -106,7 +108,6 @@ func (a *App) run(ctx context.Context) error { var err error log := logger.Get(ctx) - log.Infof("Starting Teleport Access PagerDuty Plugin") if err = a.init(ctx); err != nil { return trace.Wrap(err) @@ -146,9 +147,9 @@ func (a *App) run(ctx context.Context) error { a.mainJob.SetReady(ok) if ok { - log.Info("Plugin is ready") + log.InfoContext(ctx, "Plugin is ready") } else { - log.Error("Plugin is not ready") + log.ErrorContext(ctx, "Plugin is not ready") } <-watcherJob.Done() @@ -202,25 +203,25 @@ func (a *App) init(ctx context.Context) error { return trace.Wrap(err) } - log.Debug("Starting PagerDuty API health check...") + log.DebugContext(ctx, "Starting PagerDuty API health check") if err = a.pagerduty.HealthCheck(ctx); err != nil { return trace.Wrap(err, "api health check failed. check your credentials and service_id settings") } - log.Debug("PagerDuty API health check finished ok") + log.DebugContext(ctx, "PagerDuty API health check finished ok") return nil } func (a *App) checkTeleportVersion(ctx context.Context) (proto.PingResponse, error) { log := logger.Get(ctx) - log.Debug("Checking Teleport server version") + log.DebugContext(ctx, "Checking Teleport server version") pong, err := a.teleport.Ping(ctx) if err != nil { if trace.IsNotImplemented(err) { return pong, trace.Wrap(err, "server version must be at least %s", minServerVersion) } - log.Error("Unable to get Teleport server version") + log.ErrorContext(ctx, "Unable to get Teleport server version") return pong, trace.Wrap(err) } err = utils.CheckMinVersion(pong.ServerVersion, minServerVersion) @@ -245,16 +246,16 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error } op := event.Type reqID := event.Resource.GetName() - ctx, _ = logger.WithField(ctx, "request_id", reqID) + ctx, _ = logger.With(ctx, "request_id", reqID) switch op { case types.OpPut: - ctx, _ = logger.WithField(ctx, "request_op", "put") + ctx, _ = logger.With(ctx, "request_op", "put") req, ok := event.Resource.(types.AccessRequest) if !ok { return trace.Errorf("unexpected resource type %T", event.Resource) } - ctx, log := logger.WithField(ctx, "request_state", req.GetState().String()) + ctx, log := logger.With(ctx, "request_state", req.GetState().String()) var err error switch { @@ -263,21 +264,29 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error case req.GetState().IsResolved(): err = a.onResolvedRequest(ctx, req) default: - log.WithField("event", event).Warn("Unknown request state") + log.WarnContext(ctx, "Unknown request state", + slog.Group("event", + slog.Any("type", logutils.StringerAttr(event.Type)), + slog.Group("resource", + "kind", event.Resource.GetKind(), + "name", event.Resource.GetName(), + ), + ), + ) return nil } if err != nil { - log.WithError(err).Error("Failed to process request") + log.ErrorContext(ctx, "Failed to process request", "error", err) return trace.Wrap(err) } return nil case types.OpDelete: - ctx, log := logger.WithField(ctx, "request_op", "delete") + ctx, log := logger.With(ctx, "request_op", "delete") if err := a.onDeletedRequest(ctx, reqID); err != nil { - log.WithError(err).Error("Failed to process deleted request") + log.ErrorContext(ctx, "Failed to process deleted request", "error", err) return trace.Wrap(err) } return nil @@ -288,7 +297,7 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error func (a *App) onPendingRequest(ctx context.Context, req types.AccessRequest) error { if len(req.GetSystemAnnotations()) == 0 { - logger.Get(ctx).Debug("Cannot proceed further. Request is missing any annotations") + logger.Get(ctx).DebugContext(ctx, "Cannot proceed further - request is missing any annotations") return nil } @@ -370,11 +379,11 @@ func (a *App) tryNotifyService(ctx context.Context, req types.AccessRequest) (bo serviceName, err := a.getNotifyServiceName(ctx, req) if err != nil { - log.Debugf("Skipping the notification: %s", err) + log.DebugContext(ctx, "Skipping the notification", "error", err) return false, trace.Wrap(errSkip) } - ctx, _ = logger.WithField(ctx, "pd_service_name", serviceName) + ctx, _ = logger.With(ctx, "pd_service_name", serviceName) service, err := a.pagerduty.FindServiceByName(ctx, serviceName) if err != nil { return false, trace.Wrap(err, "finding pagerduty service %s", serviceName) @@ -420,8 +429,8 @@ func (a *App) createIncident(ctx context.Context, serviceID, reqID string, reqDa if err != nil { return trace.Wrap(err) } - ctx, log := logger.WithField(ctx, "pd_incident_id", data.IncidentID) - log.Info("Successfully created PagerDuty incident") + ctx, log := logger.With(ctx, "pd_incident_id", data.IncidentID) + log.InfoContext(ctx, "Successfully created PagerDuty incident") // Save pagerduty incident info in plugin data. _, err = a.modifyPluginData(ctx, reqID, func(existing *PluginData) (PluginData, bool) { @@ -465,10 +474,10 @@ func (a *App) postReviewNotes(ctx context.Context, reqID string, reqReviews []ty return trace.Wrap(err) } if !ok { - logger.Get(ctx).Debug("Failed to post the note: plugin data is missing") + logger.Get(ctx).DebugContext(ctx, "Failed to post the note: plugin data is missing") return nil } - ctx, _ = logger.WithField(ctx, "pd_incident_id", data.IncidentID) + ctx, _ = logger.With(ctx, "pd_incident_id", data.IncidentID) slice := reqReviews[oldCount:] if len(slice) == 0 { @@ -490,36 +499,40 @@ func (a *App) tryApproveRequest(ctx context.Context, req types.AccessRequest) er serviceNames, err := a.getOnCallServiceNames(req) if err != nil { - logger.Get(ctx).Debugf("Skipping the approval: %s", err) + logger.Get(ctx).DebugContext(ctx, "Skipping approval", "error", err) return nil } userName := req.GetUser() if !lib.IsEmail(userName) { - logger.Get(ctx).Warningf("Skipping the approval: %q does not look like a valid email", userName) + logger.Get(ctx).WarnContext(ctx, "Skipping approval, found invalid email", "pd_user_email", userName) return nil } user, err := a.pagerduty.FindUserByEmail(ctx, userName) if err != nil { if trace.IsNotFound(err) { - log.WithError(err).WithField("pd_user_email", userName).Debug("Skipping the approval: email is not found") + log.DebugContext(ctx, "Skipping approval, email is not found", + "error", err, + "pd_user_email", userName) return nil } return trace.Wrap(err) } - ctx, log = logger.WithFields(ctx, logger.Fields{ - "pd_user_email": user.Email, - "pd_user_name": user.Name, - }) + ctx, log = logger.With(ctx, + "pd_user_email", user.Email, + "pd_user_name", user.Name, + ) services, err := a.pagerduty.FindServicesByNames(ctx, serviceNames) if err != nil { return trace.Wrap(err) } if len(services) == 0 { - log.WithField("pd_service_names", serviceNames).Warning("Failed to find any service") + log.WarnContext(ctx, "Failed to find any service", + "pd_service_names", serviceNames, + ) return nil } @@ -536,7 +549,7 @@ func (a *App) tryApproveRequest(ctx context.Context, req types.AccessRequest) er return trace.Wrap(err) } if len(escalationPolicyIDs) == 0 { - log.Debug("Skipping the approval: user is not on call") + log.DebugContext(ctx, "Skipping the approval: user is not on call") return nil } @@ -561,13 +574,13 @@ func (a *App) tryApproveRequest(ctx context.Context, req types.AccessRequest) er }, }); err != nil { if strings.HasSuffix(err.Error(), "has already reviewed this request") { - log.Debug("Already reviewed the request") + log.DebugContext(ctx, "Already reviewed the request") return nil } return trace.Wrap(err, "submitting access request") } - log.Info("Successfully submitted a request approval") + log.InfoContext(ctx, "Successfully submitted a request approval") return nil } @@ -599,15 +612,15 @@ func (a *App) resolveIncident(ctx context.Context, reqID string, resolution Reso return trace.Wrap(err) } if !ok { - logger.Get(ctx).Debug("Failed to resolve the incident: plugin data is missing") + logger.Get(ctx).DebugContext(ctx, "Failed to resolve the incident: plugin data is missing") return nil } - ctx, log := logger.WithField(ctx, "pd_incident_id", incidentID) + ctx, log := logger.With(ctx, "pd_incident_id", incidentID) if err := a.pagerduty.ResolveIncident(ctx, incidentID, resolution); err != nil { return trace.Wrap(err) } - log.Info("Successfully resolved the incident") + log.InfoContext(ctx, "Successfully resolved the incident") return nil } diff --git a/integrations/access/pagerduty/client.go b/integrations/access/pagerduty/client.go index 51adfb38f5aed..fd42876a154ca 100644 --- a/integrations/access/pagerduty/client.go +++ b/integrations/access/pagerduty/client.go @@ -122,7 +122,7 @@ func onAfterPagerDutyResponse(sink common.StatusSink) resty.ResponseMiddleware { defer cancel() if err := sink.Emit(ctx, status); err != nil { - log.WithError(err).Errorf("Error while emitting PagerDuty plugin status: %v", err) + log.ErrorContext(ctx, "Error while emitting PagerDuty plugin status", "error", err) } if resp.IsError() { @@ -288,7 +288,7 @@ func (p *Pagerduty) FindUserByEmail(ctx context.Context, userEmail string) (User } if len(result.Users) > 0 && result.More { - logger.Get(ctx).Warningf("PagerDuty returned too many results when querying by email %q", userEmail) + logger.Get(ctx).WarnContext(ctx, "PagerDuty returned too many results when querying user email", "email", userEmail) } return User{}, trace.NotFound("failed to find pagerduty user by email %s", userEmail) @@ -387,10 +387,10 @@ func (p *Pagerduty) FilterOnCallPolicies(ctx context.Context, userID string, esc if len(filteredIDSet) == 0 { if anyData { - logger.Get(ctx).WithFields(logger.Fields{ - "pd_user_id": userID, - "pd_escalation_policy_ids": escalationPolicyIDs, - }).Warningf("PagerDuty returned some oncalls array but none of them matched the query") + logger.Get(ctx).WarnContext(ctx, "PagerDuty returned some oncalls array but none of them matched the query", + "pd_user_id", userID, + "pd_escalation_policy_ids", escalationPolicyIDs, + ) } return nil, nil diff --git a/integrations/access/pagerduty/cmd/teleport-pagerduty/main.go b/integrations/access/pagerduty/cmd/teleport-pagerduty/main.go index aa4a8ba96eb32..58cfa27248d56 100644 --- a/integrations/access/pagerduty/cmd/teleport-pagerduty/main.go +++ b/integrations/access/pagerduty/cmd/teleport-pagerduty/main.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -65,12 +66,13 @@ func main() { if err := run(*path, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } } func run(configPath string, debug bool) error { + ctx := context.Background() conf, err := pagerduty.LoadConfig(configPath) if err != nil { return trace.Wrap(err) @@ -84,7 +86,7 @@ func run(configPath string, debug bool) error { return err } if debug { - logger.Standard().Debugf("DEBUG logging enabled") + slog.DebugContext(ctx, "DEBUG logging enabled") } app, err := pagerduty.NewApp(*conf) @@ -94,8 +96,9 @@ func run(configPath string, debug bool) error { go lib.ServeSignals(app, common.PluginShutdownTimeout) - logger.Standard().Infof("Starting Teleport Access PagerDuty Plugin %s:%s", teleport.Version, teleport.Gitref) - return trace.Wrap( - app.Run(context.Background()), + slog.InfoContext(ctx, "Starting Teleport Access PagerDuty Plugin", + "version", teleport.Version, + "git_ref", teleport.Gitref, ) + return trace.Wrap(app.Run(ctx)) } diff --git a/integrations/access/pagerduty/testlib/fake_pagerduty.go b/integrations/access/pagerduty/testlib/fake_pagerduty.go index 18a2a6ae24361..eee358f022458 100644 --- a/integrations/access/pagerduty/testlib/fake_pagerduty.go +++ b/integrations/access/pagerduty/testlib/fake_pagerduty.go @@ -32,7 +32,6 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/pagerduty" "github.com/gravitational/teleport/integrations/lib/stringset" @@ -565,6 +564,6 @@ func (s *FakePagerduty) CheckNewIncidentNote(ctx context.Context) (FakeIncidentN func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/servicenow/app.go b/integrations/access/servicenow/app.go index 3d56f4fc97a8b..07248b488d872 100644 --- a/integrations/access/servicenow/app.go +++ b/integrations/access/servicenow/app.go @@ -21,6 +21,7 @@ package servicenow import ( "context" "fmt" + "log/slog" "net/url" "slices" "strings" @@ -41,6 +42,7 @@ import ( "github.com/gravitational/teleport/integrations/lib/logger" "github.com/gravitational/teleport/integrations/lib/watcherjob" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -116,7 +118,7 @@ func (a *App) WaitReady(ctx context.Context) (bool, error) { func (a *App) run(ctx context.Context) error { log := logger.Get(ctx) - log.Infof("Starting Teleport Access Servicenow Plugin") + log.InfoContext(ctx, "Starting Teleport Access Servicenow Plugin") if err := a.init(ctx); err != nil { return trace.Wrap(err) @@ -153,9 +155,9 @@ func (a *App) run(ctx context.Context) error { } a.mainJob.SetReady(ok) if ok { - log.Info("ServiceNow plugin is ready") + log.InfoContext(ctx, "ServiceNow plugin is ready") } else { - log.Error("ServiceNow plugin is not ready") + log.ErrorContext(ctx, "ServiceNow plugin is not ready") } <-watcherJob.Done() @@ -190,25 +192,25 @@ func (a *App) init(ctx context.Context) error { return trace.Wrap(err) } - log.Debug("Starting API health check...") + log.DebugContext(ctx, "Starting API health check") if err = a.serviceNow.CheckHealth(ctx); err != nil { return trace.Wrap(err, "API health check failed") } - log.Debug("API health check finished ok") + log.DebugContext(ctx, "API health check finished ok") return nil } func (a *App) checkTeleportVersion(ctx context.Context) (proto.PingResponse, error) { log := logger.Get(ctx) - log.Debug("Checking Teleport server version") + log.DebugContext(ctx, "Checking Teleport server version") pong, err := a.teleport.Ping(ctx) if err != nil { if trace.IsNotImplemented(err) { return pong, trace.Wrap(err, "server version must be at least %s", minServerVersion) } - log.Error("Unable to get Teleport server version") + log.ErrorContext(ctx, "Unable to get Teleport server version") return pong, trace.Wrap(err) } err = utils.CheckMinVersion(pong.ServerVersion, minServerVersion) @@ -233,16 +235,16 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error } op := event.Type reqID := event.Resource.GetName() - ctx, _ = logger.WithField(ctx, "request_id", reqID) + ctx, _ = logger.With(ctx, "request_id", reqID) switch op { case types.OpPut: - ctx, _ = logger.WithField(ctx, "request_op", "put") + ctx, _ = logger.With(ctx, "request_op", "put") req, ok := event.Resource.(types.AccessRequest) if !ok { return trace.Errorf("unexpected resource type %T", event.Resource) } - ctx, log := logger.WithField(ctx, "request_state", req.GetState().String()) + ctx, log := logger.With(ctx, "request_state", req.GetState().String()) var err error switch { @@ -251,21 +253,29 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error case req.GetState().IsResolved(): err = a.onResolvedRequest(ctx, req) default: - log.WithField("event", event).Warnf("Unknown request state: %q", req.GetState()) + log.WarnContext(ctx, "Unknown request state", + slog.Group("event", + slog.Any("type", logutils.StringerAttr(event.Type)), + slog.Group("resource", + "kind", event.Resource.GetKind(), + "name", event.Resource.GetName(), + ), + ), + ) return nil } if err != nil { - log.WithError(err).Error("Failed to process request") + log.ErrorContext(ctx, "Failed to process request", "error", err) return trace.Wrap(err) } return nil case types.OpDelete: - ctx, log := logger.WithField(ctx, "request_op", "delete") + ctx, log := logger.With(ctx, "request_op", "delete") if err := a.onDeletedRequest(ctx, reqID); err != nil { - log.WithError(err).Error("Failed to process deleted request") + log.ErrorContext(ctx, "Failed to process deleted request", "error", err) return trace.Wrap(err) } return nil @@ -276,7 +286,7 @@ func (a *App) handleAccessRequest(ctx context.Context, event types.Event) error func (a *App) onPendingRequest(ctx context.Context, req types.AccessRequest) error { reqID := req.GetName() - log := logger.Get(ctx).WithField("reqId", reqID) + log := logger.Get(ctx).With("req_id", reqID) resourceNames, err := a.getResourceNames(ctx, req) if err != nil { @@ -303,7 +313,7 @@ func (a *App) onPendingRequest(ctx context.Context, req types.AccessRequest) err } if isNew { - log.Infof("Creating servicenow incident") + log.InfoContext(ctx, "Creating servicenow incident") recipientAssignee := a.accessMonitoringRules.RecipientsFromAccessMonitoringRules(ctx, req) assignees := []string{} recipientAssignee.ForEach(func(r common.Recipient) { @@ -375,8 +385,8 @@ func (a *App) createIncident(ctx context.Context, reqID string, reqData RequestD if err != nil { return trace.Wrap(err) } - ctx, log := logger.WithField(ctx, "servicenow_incident_id", data.IncidentID) - log.Info("Successfully created Servicenow incident") + ctx, log := logger.With(ctx, "servicenow_incident_id", data.IncidentID) + log.InfoContext(ctx, "Successfully created Servicenow incident") // Save servicenow incident info in plugin data. _, err = a.modifyPluginData(ctx, reqID, func(existing *PluginData) (PluginData, bool) { @@ -420,10 +430,10 @@ func (a *App) postReviewNotes(ctx context.Context, reqID string, reqReviews []ty return trace.Wrap(err) } if !ok { - logger.Get(ctx).Debug("Failed to post the note: plugin data is missing") + logger.Get(ctx).DebugContext(ctx, "Failed to post the note: plugin data is missing") return nil } - ctx, _ = logger.WithField(ctx, "servicenow_incident_id", data.IncidentID) + ctx, _ = logger.With(ctx, "servicenow_incident_id", data.IncidentID) slice := reqReviews[oldCount:] if len(slice) == 0 { @@ -445,22 +455,28 @@ func (a *App) tryApproveRequest(ctx context.Context, req types.AccessRequest) er serviceNames, err := a.getOnCallServiceNames(req) if err != nil { - logger.Get(ctx).Debugf("Skipping the approval: %s", err) + logger.Get(ctx).DebugContext(ctx, "Skipping the approval", "error", err) return nil } - log.Debugf("Checking the following shifts to see if the requester is on-call: %s", serviceNames) + log.DebugContext(ctx, "Checking the shifts to see if the requester is on-call", "shifts", serviceNames) onCallUsers, err := a.getOnCallUsers(ctx, serviceNames) if err != nil { return trace.Wrap(err) } - log.Debugf("Users on-call are: %s", onCallUsers) + log.DebugContext(ctx, "Users on-call are", "on_call_users", onCallUsers) if userIsOnCall := slices.Contains(onCallUsers, req.GetUser()); !userIsOnCall { - log.Debugf("User %q is not on-call, not approving the request %q.", req.GetUser(), req.GetName()) + log.DebugContext(ctx, "User is not on-call, not approving the request", + "user", req.GetUser(), + "request", req.GetName(), + ) return nil } - log.Debugf("User %q is on-call. Auto-approving the request %q.", req.GetUser(), req.GetName()) + log.DebugContext(ctx, "User is on-call, auto-approving the request", + "user", req.GetUser(), + "request", req.GetName(), + ) if _, err := a.teleport.SubmitAccessReview(ctx, types.AccessReviewSubmission{ RequestID: req.GetName(), Review: types.AccessReview{ @@ -474,12 +490,12 @@ func (a *App) tryApproveRequest(ctx context.Context, req types.AccessRequest) er }, }); err != nil { if strings.HasSuffix(err.Error(), "has already reviewed this request") { - log.Debug("Already reviewed the request") + log.DebugContext(ctx, "Already reviewed the request") return nil } return trace.Wrap(err, "submitting access request") } - log.Info("Successfully submitted a request approval") + log.InfoContext(ctx, "Successfully submitted a request approval") return nil } @@ -490,7 +506,7 @@ func (a *App) getOnCallUsers(ctx context.Context, serviceNames []string) ([]stri respondersResult, err := a.serviceNow.GetOnCall(ctx, scheduleName) if err != nil { if trace.IsNotFound(err) { - log.WithError(err).Error("Failed to retrieve responder from schedule") + log.ErrorContext(ctx, "Failed to retrieve responder from schedule", "error", err) continue } return nil, trace.Wrap(err) @@ -528,15 +544,15 @@ func (a *App) resolveIncident(ctx context.Context, reqID string, resolution Reso return trace.Wrap(err) } if !ok { - logger.Get(ctx).Debug("Failed to resolve the incident: plugin data is missing") + logger.Get(ctx).DebugContext(ctx, "Failed to resolve the incident: plugin data is missing") return nil } - ctx, log := logger.WithField(ctx, "servicenow_incident_id", incidentID) + ctx, log := logger.With(ctx, "servicenow_incident_id", incidentID) if err := a.serviceNow.ResolveIncident(ctx, incidentID, resolution); err != nil { return trace.Wrap(err) } - log.Info("Successfully resolved the incident") + log.InfoContext(ctx, "Successfully resolved the incident") return nil } diff --git a/integrations/access/servicenow/client.go b/integrations/access/servicenow/client.go index 8d0fb4f62b9de..8c306c1efa4ee 100644 --- a/integrations/access/servicenow/client.go +++ b/integrations/access/servicenow/client.go @@ -287,7 +287,10 @@ func (snc *Client) CheckHealth(ctx context.Context) error { } if err := snc.StatusSink.Emit(ctx, &types.PluginStatusV1{Code: code}); err != nil { log := logger.Get(resp.Request.Context()) - log.WithError(err).WithField("code", resp.StatusCode()).Errorf("Error while emitting servicenow plugin status: %v", err) + log.ErrorContext(ctx, "Error while emitting servicenow plugin status", + "error", err, + "code", resp.StatusCode(), + ) } } diff --git a/integrations/access/servicenow/testlib/fake_servicenow.go b/integrations/access/servicenow/testlib/fake_servicenow.go index 3b2d70e82a9b2..edf3fdced5fe7 100644 --- a/integrations/access/servicenow/testlib/fake_servicenow.go +++ b/integrations/access/servicenow/testlib/fake_servicenow.go @@ -32,7 +32,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/servicenow" "github.com/gravitational/teleport/integrations/lib/stringset" @@ -284,6 +283,6 @@ func (s *FakeServiceNow) getOnCall(rotationName string) []string { func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/access/slack/bot.go b/integrations/access/slack/bot.go index 9c58093cb9897..e7fefa0107163 100644 --- a/integrations/access/slack/bot.go +++ b/integrations/access/slack/bot.go @@ -29,7 +29,6 @@ import ( "github.com/go-resty/resty/v2" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" @@ -37,6 +36,7 @@ import ( "github.com/gravitational/teleport/integrations/access/accessrequest" "github.com/gravitational/teleport/integrations/access/common" "github.com/gravitational/teleport/integrations/lib" + "github.com/gravitational/teleport/integrations/lib/logger" pd "github.com/gravitational/teleport/integrations/lib/plugindata" ) @@ -68,7 +68,7 @@ func onAfterResponseSlack(sink common.StatusSink) func(_ *resty.Client, resp *re ctx, cancel := context.WithTimeout(context.Background(), statusEmitTimeout) defer cancel() if err := sink.Emit(ctx, status); err != nil { - log.Errorf("Error while emitting plugin status: %v", err) + logger.Get(ctx).ErrorContext(ctx, "Error while emitting plugin status", "error", err) } }() @@ -139,7 +139,7 @@ func (b Bot) BroadcastAccessRequestMessage(ctx context.Context, recipients []com // the case with most SSO setups. userRecipient, err := b.FetchRecipient(ctx, reqData.User) if err != nil { - log.Warningf("Unable to find user %s in Slack, will not be able to notify.", reqData.User) + logger.Get(ctx).WarnContext(ctx, "Unable to find user in Slack, will not be able to notify", "user", reqData.User) } // Include the user in the list of recipients if it exists. diff --git a/integrations/access/slack/cmd/teleport-slack/main.go b/integrations/access/slack/cmd/teleport-slack/main.go index 1f77db5f21492..ffa73144f540b 100644 --- a/integrations/access/slack/cmd/teleport-slack/main.go +++ b/integrations/access/slack/cmd/teleport-slack/main.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -65,12 +66,13 @@ func main() { if err := run(*path, *debug); err != nil { lib.Bail(err) } else { - logger.Standard().Info("Successfully shut down") + slog.InfoContext(context.Background(), "Successfully shut down") } } } func run(configPath string, debug bool) error { + ctx := context.Background() conf, err := slack.LoadSlackConfig(configPath) if err != nil { return trace.Wrap(err) @@ -84,14 +86,15 @@ func run(configPath string, debug bool) error { return trace.Wrap(err) } if debug { - logger.Standard().Debugf("DEBUG logging enabled") + slog.DebugContext(ctx, "DEBUG logging enabled") } app := slack.NewSlackApp(conf) go lib.ServeSignals(app, common.PluginShutdownTimeout) - logger.Standard().Infof("Starting Teleport Access Slack Plugin %s:%s", teleport.Version, teleport.Gitref) - return trace.Wrap( - app.Run(context.Background()), + slog.InfoContext(ctx, "Starting Teleport Access Slack Plugin", + "version", teleport.Version, + "git_ref", teleport.Gitref, ) + return trace.Wrap(app.Run(ctx)) } diff --git a/integrations/access/slack/testlib/fake_slack.go b/integrations/access/slack/testlib/fake_slack.go index eef81460da7f1..d18a43230c744 100644 --- a/integrations/access/slack/testlib/fake_slack.go +++ b/integrations/access/slack/testlib/fake_slack.go @@ -31,7 +31,6 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/integrations/access/slack" ) @@ -315,6 +314,6 @@ func (s *FakeSlack) CheckMessageUpdateByResponding(ctx context.Context) (slack.M func panicIf(err error) { if err != nil { - log.Panicf("%v at %v", err, string(debug.Stack())) + panic(fmt.Sprintf("%v at %v", err, string(debug.Stack()))) } } diff --git a/integrations/event-handler/fake_fluentd_test.go b/integrations/event-handler/fake_fluentd_test.go index ecf286569f12d..72a363468ba15 100644 --- a/integrations/event-handler/fake_fluentd_test.go +++ b/integrations/event-handler/fake_fluentd_test.go @@ -31,8 +31,6 @@ import ( "github.com/gravitational/trace" "github.com/stretchr/testify/require" - - "github.com/gravitational/teleport/integrations/lib/logger" ) type FakeFluentd struct { @@ -150,7 +148,6 @@ func (f *FakeFluentd) GetURL() string { func (f *FakeFluentd) Respond(w http.ResponseWriter, r *http.Request) { req, err := io.ReadAll(r.Body) if err != nil { - logger.Standard().WithError(err).Error("FakeFluentd Respond() failed to read body") fmt.Fprintln(w, "NOK") return } diff --git a/integrations/event-handler/main.go b/integrations/event-handler/main.go index 859f6544c1e06..693b5bb24e036 100644 --- a/integrations/event-handler/main.go +++ b/integrations/event-handler/main.go @@ -46,8 +46,6 @@ const ( ) func main() { - // This initializes the legacy logrus logger. This has been kept in place - // in case any of the dependencies are still using logrus. logger.Init() ctx := kong.Parse( @@ -64,17 +62,13 @@ func main() { Format: "text", } if cli.Debug { - enableLogDebug() logCfg.Severity = "debug" } - log, err := logCfg.NewSLogLogger() - if err != nil { - fmt.Println(trace.DebugReport(trace.Wrap(err, "initializing logger"))) + + if err := logger.Setup(logCfg); err != nil { + fmt.Println(trace.DebugReport(err)) os.Exit(-1) } - // Whilst this package mostly dependency injects slog, upstream dependencies - // may still use the default slog logger. - slog.SetDefault(log) switch { case ctx.Command() == "version": @@ -86,25 +80,16 @@ func main() { os.Exit(-1) } case ctx.Command() == "start": - err := start(log) + err := start(slog.Default()) if err != nil { lib.Bail(err) } else { - log.InfoContext(context.TODO(), "Successfully shut down") + slog.InfoContext(context.TODO(), "Successfully shut down") } } } -// turn on log debugging -func enableLogDebug() { - err := logger.Setup(logger.Config{Severity: "debug", Output: "stderr"}) - if err != nil { - fmt.Println(trace.DebugReport(err)) - os.Exit(-1) - } -} - // start spawns the main process func start(log *slog.Logger) error { app, err := NewApp(&cli.Start, log) diff --git a/integrations/lib/bail.go b/integrations/lib/bail.go index 72804cd0ac3c4..d1351bb05f7fe 100644 --- a/integrations/lib/bail.go +++ b/integrations/lib/bail.go @@ -19,22 +19,24 @@ package lib import ( + "context" "errors" + "log/slog" "os" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" ) // Bail exits with nonzero exit code and prints an error to a log. func Bail(err error) { + ctx := context.Background() var agg trace.Aggregate if errors.As(trace.Unwrap(err), &agg) { for i, err := range agg.Errors() { - log.WithError(err).Errorf("Terminating with fatal error [%d]...", i+1) + slog.ErrorContext(ctx, "Terminating with fatal error", "error_number", i+1, "error", err) } } else { - log.WithError(err).Error("Terminating with fatal error...") + slog.ErrorContext(ctx, "Terminating with fatal error", "error", err) } os.Exit(1) } diff --git a/integrations/lib/config.go b/integrations/lib/config.go index 24f6c981e6686..66285167e5e36 100644 --- a/integrations/lib/config.go +++ b/integrations/lib/config.go @@ -22,12 +22,12 @@ import ( "context" "errors" "io" + "log/slog" "os" "strings" "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "google.golang.org/grpc" grpcbackoff "google.golang.org/grpc/backoff" @@ -137,7 +137,7 @@ func NewIdentityFileWatcher(ctx context.Context, path string, interval time.Dura } if err := dynamicCred.Reload(); err != nil { - log.WithError(err).Error("Failed to reload identity file from disk.") + slog.ErrorContext(ctx, "Failed to reload identity file from disk", "error", err) } timer.Reset(interval) } @@ -152,7 +152,7 @@ func (cfg TeleportConfig) NewClient(ctx context.Context) (*client.Client, error) case cfg.Addr != "": addr = cfg.Addr case cfg.AuthServer != "": - log.Warn("Configuration setting `auth_server` is deprecated, consider to change it to `addr`") + slog.WarnContext(ctx, "Configuration setting `auth_server` is deprecated, consider to change it to `addr`") addr = cfg.AuthServer } @@ -173,13 +173,13 @@ func (cfg TeleportConfig) NewClient(ctx context.Context) (*client.Client, error) } if validCred, err := credentials.CheckIfExpired(creds); err != nil { - log.Warn(err) + slog.WarnContext(ctx, "found expired credentials", "error", err) if !validCred { return nil, trace.BadParameter( "No valid credentials found, this likely means credentials are expired. In this case, please sign new credentials and increase their TTL if needed.", ) } - log.Info("At least one non-expired credential has been found, continuing startup") + slog.InfoContext(ctx, "At least one non-expired credential has been found, continuing startup") } bk := grpcbackoff.DefaultConfig diff --git a/integrations/lib/embeddedtbot/bot.go b/integrations/lib/embeddedtbot/bot.go index e693b40793fe5..b8ed026386114 100644 --- a/integrations/lib/embeddedtbot/bot.go +++ b/integrations/lib/embeddedtbot/bot.go @@ -26,7 +26,6 @@ import ( "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/proto" @@ -106,9 +105,9 @@ func (b *EmbeddedBot) start(ctx context.Context) { go func() { err := bot.Run(botCtx) if err != nil { - log.Errorf("bot exited with error: %s", err) + slog.ErrorContext(botCtx, "bot exited with error", "error", err) } else { - log.Infof("bot exited without error") + slog.InfoContext(botCtx, "bot exited without error") } b.errCh <- trace.Wrap(err) }() @@ -142,10 +141,10 @@ func (b *EmbeddedBot) waitForCredentials(ctx context.Context, deadline time.Dura select { case <-waitCtx.Done(): - log.Warn("context canceled while waiting for the bot client") + slog.WarnContext(ctx, "context canceled while waiting for the bot client") return nil, trace.Wrap(ctx.Err()) case <-b.credential.Ready(): - log.Infof("credential ready") + slog.InfoContext(ctx, "credential ready") } return b.credential, nil @@ -177,7 +176,7 @@ func (b *EmbeddedBot) StartAndWaitForCredentials(ctx context.Context, deadline t // buildClient reads tbot's memory disttination, retrieves the certificates // and builds a new Teleport client using those certs. func (b *EmbeddedBot) buildClient(ctx context.Context) (*client.Client, error) { - log.Infof("Building a new client to connect to %s", b.cfg.AuthServer) + slog.InfoContext(ctx, "Building a new client to connect to cluster", "auth_server_address", b.cfg.AuthServer) c, err := client.New(ctx, client.Config{ Addrs: []string{b.cfg.AuthServer}, Credentials: []client.Credentials{b.credential}, diff --git a/integrations/lib/http.go b/integrations/lib/http.go index dbb279913a5bd..6f98ad957a75c 100644 --- a/integrations/lib/http.go +++ b/integrations/lib/http.go @@ -24,6 +24,7 @@ import ( "crypto/x509" "errors" "fmt" + "log/slog" "net" "net/http" "net/url" @@ -33,7 +34,8 @@ import ( "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - log "github.com/sirupsen/logrus" + + logutils "github.com/gravitational/teleport/lib/utils/log" ) // TLSConfig stores TLS configuration for a http service @@ -178,7 +180,7 @@ func NewHTTP(config HTTPConfig) (*HTTP, error) { if verify := config.TLS.VerifyClientCertificateFunc; verify != nil { tlsConfig.VerifyPeerCertificate = func(_ [][]byte, chains [][]*x509.Certificate) error { if err := verify(chains); err != nil { - log.WithError(err).Error("HTTPS client certificate verification failed") + slog.ErrorContext(context.Background(), "HTTPS client certificate verification failed", "error", err) return err } return nil @@ -217,7 +219,7 @@ func BuildURLPath(args ...interface{}) string { // ListenAndServe runs a http(s) server on a provided port. func (h *HTTP) ListenAndServe(ctx context.Context) error { - defer log.Debug("HTTP server terminated") + defer slog.DebugContext(ctx, "HTTP server terminated") var err error h.server.BaseContext = func(_ net.Listener) context.Context { @@ -256,10 +258,10 @@ func (h *HTTP) ListenAndServe(ctx context.Context) error { } if h.Insecure { - log.Debugf("Starting insecure HTTP server on %s", addr) + slog.DebugContext(ctx, "Starting insecure HTTP server", "listen_addr", logutils.StringerAttr(addr)) err = h.server.Serve(listener) } else { - log.Debugf("Starting secure HTTPS server on %s", addr) + slog.DebugContext(ctx, "Starting secure HTTPS server", "listen_addr", logutils.StringerAttr(addr)) err = h.server.ServeTLS(listener, h.CertFile, h.KeyFile) } if errors.Is(err, http.ErrServerClosed) { @@ -288,7 +290,7 @@ func (h *HTTP) ServiceJob() ServiceJob { return NewServiceJob(func(ctx context.Context) error { MustGetProcess(ctx).OnTerminate(func(ctx context.Context) error { if err := h.ShutdownWithTimeout(ctx, time.Second*5); err != nil { - log.Error("HTTP server graceful shutdown failed") + slog.ErrorContext(ctx, "HTTP server graceful shutdown failed") return err } return nil diff --git a/integrations/lib/logger/logger.go b/integrations/lib/logger/logger.go index 7422f03ff906c..a1ce5bf7275ed 100644 --- a/integrations/lib/logger/logger.go +++ b/integrations/lib/logger/logger.go @@ -20,16 +20,11 @@ package logger import ( "context" - "io" - "io/fs" "log/slog" "os" - "strings" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/utils" logutils "github.com/gravitational/teleport/lib/utils/log" ) @@ -41,8 +36,6 @@ type Config struct { Format string `toml:"format"` } -type Fields = log.Fields - type contextKey struct{} var extraFields = []string{logutils.LevelField, logutils.ComponentField, logutils.CallerField} @@ -50,179 +43,50 @@ var extraFields = []string{logutils.LevelField, logutils.ComponentField, logutil // Init sets up logger for a typical daemon scenario until configuration // file is parsed func Init() { - formatter := &logutils.TextFormatter{ - EnableColors: utils.IsTerminal(os.Stderr), - ComponentPadding: 1, // We don't use components so strip the padding - ExtraFields: extraFields, - } - - log.SetOutput(os.Stderr) - if err := formatter.CheckAndSetDefaults(); err != nil { - log.WithError(err).Error("unable to create text log formatter") - return - } - - log.SetFormatter(formatter) + enableColors := utils.IsTerminal(os.Stderr) + logutils.Initialize(logutils.Config{ + Severity: slog.LevelInfo.String(), + Format: "text", + ExtraFields: extraFields, + EnableColors: enableColors, + Padding: 1, + }) } func Setup(conf Config) error { + var enableColors bool switch conf.Output { case "stderr", "error", "2": - log.SetOutput(os.Stderr) + enableColors = utils.IsTerminal(os.Stderr) case "", "stdout", "out", "1": - log.SetOutput(os.Stdout) + enableColors = utils.IsTerminal(os.Stdout) default: - // assume it's a file path: - logFile, err := os.Create(conf.Output) - if err != nil { - return trace.Wrap(err, "failed to create the log file") - } - log.SetOutput(logFile) } - switch strings.ToLower(conf.Severity) { - case "info": - log.SetLevel(log.InfoLevel) - case "err", "error": - log.SetLevel(log.ErrorLevel) - case "debug": - log.SetLevel(log.DebugLevel) - case "warn", "warning": - log.SetLevel(log.WarnLevel) - case "trace": - log.SetLevel(log.TraceLevel) - default: - return trace.BadParameter("unsupported logger severity: '%v'", conf.Severity) - } - - return nil + _, _, err := logutils.Initialize(logutils.Config{ + Output: conf.Output, + Severity: conf.Severity, + Format: conf.Format, + ExtraFields: extraFields, + EnableColors: enableColors, + Padding: 1, + }) + return trace.Wrap(err) } -// NewSLogLogger builds a slog.Logger from the logger.Config. -// TODO(tross): Defer logging initialization to logutils.Initialize and use the -// global slog loggers once integrations has been updated to use slog. -func (conf Config) NewSLogLogger() (*slog.Logger, error) { - const ( - // logFileDefaultMode is the preferred permissions mode for log file. - logFileDefaultMode fs.FileMode = 0o644 - // logFileDefaultFlag is the preferred flags set to log file. - logFileDefaultFlag = os.O_WRONLY | os.O_CREATE | os.O_APPEND - ) - - var w io.Writer - switch conf.Output { - case "": - w = logutils.NewSharedWriter(os.Stderr) - case "stderr", "error", "2": - w = logutils.NewSharedWriter(os.Stderr) - case "stdout", "out", "1": - w = logutils.NewSharedWriter(os.Stdout) - case teleport.Syslog: - w = os.Stderr - sw, err := logutils.NewSyslogWriter() - if err != nil { - slog.Default().ErrorContext(context.Background(), "Failed to switch logging to syslog", "error", err) - break - } - - // If syslog output has been configured and is supported by the operating system, - // then the shared writer is not needed because the syslog writer is already - // protected with a mutex. - w = sw - default: - // Assume this is a file path. - sharedWriter, err := logutils.NewFileSharedWriter(conf.Output, logFileDefaultFlag, logFileDefaultMode) - if err != nil { - return nil, trace.Wrap(err, "failed to init the log file shared writer") - } - w = logutils.NewWriterFinalizer[*logutils.FileSharedWriter](sharedWriter) - if err := sharedWriter.RunWatcherReopen(context.Background()); err != nil { - return nil, trace.Wrap(err) - } - } - - level := new(slog.LevelVar) - switch strings.ToLower(conf.Severity) { - case "", "info": - level.Set(slog.LevelInfo) - case "err", "error": - level.Set(slog.LevelError) - case teleport.DebugLevel: - level.Set(slog.LevelDebug) - case "warn", "warning": - level.Set(slog.LevelWarn) - case "trace": - level.Set(logutils.TraceLevel) - default: - return nil, trace.BadParameter("unsupported logger severity: %q", conf.Severity) - } - - configuredFields, err := logutils.ValidateFields(extraFields) - if err != nil { - return nil, trace.Wrap(err) - } - - var slogLogger *slog.Logger - switch strings.ToLower(conf.Format) { - case "": - fallthrough // not set. defaults to 'text' - case "text": - enableColors := utils.IsTerminal(os.Stderr) - slogLogger = slog.New(logutils.NewSlogTextHandler(w, logutils.SlogTextHandlerConfig{ - Level: level, - EnableColors: enableColors, - ConfiguredFields: configuredFields, - })) - slog.SetDefault(slogLogger) - case "json": - slogLogger = slog.New(logutils.NewSlogJSONHandler(w, logutils.SlogJSONHandlerConfig{ - Level: level, - ConfiguredFields: configuredFields, - })) - slog.SetDefault(slogLogger) - default: - return nil, trace.BadParameter("unsupported log output format : %q", conf.Format) - } - - return slogLogger, nil -} - -func WithLogger(ctx context.Context, logger log.FieldLogger) context.Context { - return withLogger(ctx, logger) -} - -func withLogger(ctx context.Context, logger log.FieldLogger) context.Context { +func WithLogger(ctx context.Context, logger *slog.Logger) context.Context { return context.WithValue(ctx, contextKey{}, logger) } -func WithField(ctx context.Context, key string, value interface{}) (context.Context, log.FieldLogger) { - logger := Get(ctx).WithField(key, value) - return withLogger(ctx, logger), logger +func With(ctx context.Context, args ...any) (context.Context, *slog.Logger) { + logger := Get(ctx).With(args...) + return WithLogger(ctx, logger), logger } -func WithFields(ctx context.Context, logFields Fields) (context.Context, log.FieldLogger) { - logger := Get(ctx).WithFields(logFields) - return withLogger(ctx, logger), logger -} - -func SetField(ctx context.Context, key string, value interface{}) context.Context { - ctx, _ = WithField(ctx, key, value) - return ctx -} - -func SetFields(ctx context.Context, logFields Fields) context.Context { - ctx, _ = WithFields(ctx, logFields) - return ctx -} - -func Get(ctx context.Context) log.FieldLogger { - if logger, ok := ctx.Value(contextKey{}).(log.FieldLogger); ok && logger != nil { +func Get(ctx context.Context) *slog.Logger { + if logger, ok := ctx.Value(contextKey{}).(*slog.Logger); ok && logger != nil { return logger } - return Standard() -} - -func Standard() log.FieldLogger { - return log.StandardLogger() + return slog.Default() } diff --git a/integrations/lib/signals.go b/integrations/lib/signals.go index 4774915a6271b..4702455dfc7ca 100644 --- a/integrations/lib/signals.go +++ b/integrations/lib/signals.go @@ -20,12 +20,11 @@ package lib import ( "context" + "log/slog" "os" "os/signal" "syscall" "time" - - log "github.com/sirupsen/logrus" ) type Terminable interface { @@ -48,9 +47,9 @@ func ServeSignals(app Terminable, shutdownTimeout time.Duration) { gracefulShutdown := func() { tctx, tcancel := context.WithTimeout(ctx, shutdownTimeout) defer tcancel() - log.Infof("Attempting graceful shutdown...") + slog.InfoContext(tctx, "Attempting graceful shutdown") if err := app.Shutdown(tctx); err != nil { - log.Infof("Graceful shutdown failed. Trying fast shutdown...") + slog.InfoContext(tctx, "Graceful shutdown failed, attempting fast shutdown") app.Close() } } diff --git a/integrations/lib/tctl/tctl.go b/integrations/lib/tctl/tctl.go index 25e7e5e95e0da..5fa0a3252b45b 100644 --- a/integrations/lib/tctl/tctl.go +++ b/integrations/lib/tctl/tctl.go @@ -27,6 +27,7 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/integrations/lib/logger" + logutils "github.com/gravitational/teleport/lib/utils/log" ) var regexpStatusCAPin = regexp.MustCompile(`CA pin +(sha256:[a-zA-Z0-9]+)`) @@ -59,10 +60,14 @@ func (tctl Tctl) Sign(ctx context.Context, username, format, outPath string) err outPath, ) cmd := exec.CommandContext(ctx, tctl.cmd(), args...) - log.Debugf("Running %s", cmd) + log.DebugContext(ctx, "Running tctl auth sign", "command", logutils.StringerAttr(cmd)) output, err := cmd.CombinedOutput() if err != nil { - log.WithError(err).WithField("args", args).Debug("tctl auth sign failed:", string(output)) + log.DebugContext(ctx, "tctl auth sign failed", + "error", err, + "args", args, + "command_output", string(output), + ) return trace.Wrap(err, "tctl auth sign failed") } return nil @@ -73,7 +78,7 @@ func (tctl Tctl) Create(ctx context.Context, resources []types.Resource) error { log := logger.Get(ctx) args := append(tctl.baseArgs(), "create") cmd := exec.CommandContext(ctx, tctl.cmd(), args...) - log.Debugf("Running %s", cmd) + log.DebugContext(ctx, "Running tctl create", "command", logutils.StringerAttr(cmd)) stdinPipe, err := cmd.StdinPipe() if err != nil { return trace.Wrap(err, "failed to get stdin pipe") @@ -81,16 +86,19 @@ func (tctl Tctl) Create(ctx context.Context, resources []types.Resource) error { go func() { defer func() { if err := stdinPipe.Close(); err != nil { - log.WithError(trace.Wrap(err)).Error("Failed to close stdin pipe") + log.ErrorContext(ctx, "Failed to close stdin pipe", "error", err) } }() if err := writeResourcesYAML(stdinPipe, resources); err != nil { - log.WithError(trace.Wrap(err)).Error("Failed to serialize resources stdin") + log.ErrorContext(ctx, "Failed to serialize resources stdin", "error", err) } }() output, err := cmd.CombinedOutput() if err != nil { - log.WithError(err).Debug("tctl create failed:", string(output)) + log.DebugContext(ctx, "tctl create failed", + "error", err, + "command_output", string(output), + ) return trace.Wrap(err, "tctl create failed") } return nil @@ -102,7 +110,7 @@ func (tctl Tctl) GetAll(ctx context.Context, query string) ([]types.Resource, er args := append(tctl.baseArgs(), "get", query) cmd := exec.CommandContext(ctx, tctl.cmd(), args...) - log.Debugf("Running %s", cmd) + log.DebugContext(ctx, "Running tctl get", "command", logutils.StringerAttr(cmd)) stdoutPipe, err := cmd.StdoutPipe() if err != nil { return nil, trace.Wrap(err, "failed to get stdout") @@ -140,7 +148,7 @@ func (tctl Tctl) GetCAPin(ctx context.Context) (string, error) { args := append(tctl.baseArgs(), "status") cmd := exec.CommandContext(ctx, tctl.cmd(), args...) - log.Debugf("Running %s", cmd) + log.DebugContext(ctx, "Running tctl status", "command", logutils.StringerAttr(cmd)) output, err := cmd.Output() if err != nil { return "", trace.Wrap(err, "failed to get auth status") diff --git a/integrations/lib/testing/integration/suite.go b/integrations/lib/testing/integration/suite.go index 22c0754f66a3b..c0f03c647ef75 100644 --- a/integrations/lib/testing/integration/suite.go +++ b/integrations/lib/testing/integration/suite.go @@ -93,7 +93,7 @@ func (s *Suite) initContexts(oldT *testing.T, newT *testing.T) { } else { baseCtx = context.Background() } - baseCtx, _ = logger.WithField(baseCtx, "test", newT.Name()) + baseCtx, _ = logger.With(baseCtx, "test", newT.Name()) baseCtx, cancel := context.WithCancel(baseCtx) newT.Cleanup(cancel) @@ -163,7 +163,7 @@ func (s *Suite) StartApp(app AppI) { if err := app.Run(ctx); err != nil { // We're in a goroutine so we can't just require.NoError(t, err). // All we can do is to log an error. - logger.Get(ctx).WithError(err).Error("Application failed") + logger.Get(ctx).ErrorContext(ctx, "Application failed", "error", err) } }() diff --git a/integrations/lib/watcherjob/watcherjob.go b/integrations/lib/watcherjob/watcherjob.go index 2999b86aaad0b..a7d2d14482ae6 100644 --- a/integrations/lib/watcherjob/watcherjob.go +++ b/integrations/lib/watcherjob/watcherjob.go @@ -130,23 +130,23 @@ func newJobWithEvents(events types.Events, config Config, fn EventFunc, watchIni if config.FailFast { return trace.WrapWithMessage(err, "Connection problem detected. Exiting as fail fast is on.") } - log.WithError(err).Error("Connection problem detected. Attempting to reconnect.") + log.ErrorContext(ctx, "Connection problem detected, attempting to reconnect", "error", err) case errors.Is(err, io.EOF): if config.FailFast { return trace.WrapWithMessage(err, "Watcher stream closed. Exiting as fail fast is on.") } - log.WithError(err).Error("Watcher stream closed. Attempting to reconnect.") + log.ErrorContext(ctx, "Watcher stream closed attempting to reconnect", "error", err) case lib.IsCanceled(err): - log.Debug("Watcher context is canceled") + log.DebugContext(ctx, "Watcher context is canceled") return trace.Wrap(err) default: - log.WithError(err).Error("Watcher event loop failed") + log.ErrorContext(ctx, "Watcher event loop failed", "error", err) return trace.Wrap(err) } // To mitigate a potentially aggressive retry loop, we wait if err := bk.Do(ctx); err != nil { - log.Debug("Watcher context was canceled while waiting before a reconnection") + log.DebugContext(ctx, "Watcher context was canceled while waiting before a reconnection") return trace.Wrap(err) } } @@ -162,7 +162,7 @@ func (job job) watchEvents(ctx context.Context) error { } defer func() { if err := watcher.Close(); err != nil { - logger.Get(ctx).WithError(err).Error("Failed to close a watcher") + logger.Get(ctx).ErrorContext(ctx, "Failed to close a watcher", "error", err) } }() @@ -170,7 +170,7 @@ func (job job) watchEvents(ctx context.Context) error { return trace.Wrap(err) } - logger.Get(ctx).Debug("Watcher connected") + logger.Get(ctx).DebugContext(ctx, "Watcher connected") job.SetReady(true) for { @@ -253,7 +253,7 @@ func (job job) eventLoop(ctx context.Context) error { event := *eventPtr resource := event.Resource if resource == nil { - log.Error("received an event with empty resource field") + log.ErrorContext(ctx, "received an event with empty resource field") } key := eventKey{kind: resource.GetKind(), name: resource.GetName()} if queue, loaded := queues[key]; loaded { diff --git a/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/debug.go b/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/debug.go index b1c7c7339c4ba..585c82058d5fb 100644 --- a/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/debug.go +++ b/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/debug.go @@ -21,38 +21,37 @@ package main import ( + "context" + "log/slog" "os" - "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" - crdgen "github.com/gravitational/teleport/integrations/operator/crdgen" + logutils "github.com/gravitational/teleport/lib/utils/log" ) func main() { - log.SetLevel(log.DebugLevel) - log.SetOutput(os.Stderr) + slog.SetDefault(slog.New(logutils.NewSlogTextHandler(os.Stderr, + logutils.SlogTextHandlerConfig{ + Level: slog.LevelDebug, + }, + ))) + ctx := context.Background() inputPath := os.Getenv(crdgen.PluginInputPathEnvironment) if inputPath == "" { - log.Error( - trace.BadParameter( - "When built with the 'debug' tag, the input path must be set through the environment variable: %s", - crdgen.PluginInputPathEnvironment, - ), - ) + slog.ErrorContext(ctx, "When built with the 'debug' tag, the input path must be set through the TELEPORT_PROTOC_READ_FILE environment variable") os.Exit(-1) } - log.Infof("This is a debug build, the protoc request is read from the file: '%s'", inputPath) + slog.InfoContext(ctx, "This is a debug build, the protoc request is read from the file", "input_path", inputPath) req, err := crdgen.ReadRequestFromFile(inputPath) if err != nil { - log.WithError(err).Error("error reading request from file") + slog.ErrorContext(ctx, "error reading request from file", "error", err) os.Exit(-1) } if err := crdgen.HandleDocsRequest(req); err != nil { - log.WithError(err).Error("Failed to generate docs") + slog.ErrorContext(ctx, "Failed to generate docs", "error", err) os.Exit(-1) } } diff --git a/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/main.go b/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/main.go index e091e5a8c1d0f..ac1be771b0bf0 100644 --- a/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/main.go +++ b/integrations/operator/crdgen/cmd/protoc-gen-crd-docs/main.go @@ -21,20 +21,26 @@ package main import ( + "context" + "log/slog" "os" "github.com/gogo/protobuf/vanity/command" - log "github.com/sirupsen/logrus" crdgen "github.com/gravitational/teleport/integrations/operator/crdgen" + logutils "github.com/gravitational/teleport/lib/utils/log" ) func main() { - log.SetLevel(log.DebugLevel) - log.SetOutput(os.Stderr) + slog.SetDefault(slog.New(logutils.NewSlogTextHandler(os.Stderr, + logutils.SlogTextHandlerConfig{ + Level: slog.LevelDebug, + }, + ))) + req := command.Read() if err := crdgen.HandleDocsRequest(req); err != nil { - log.WithError(err).Error("Failed to generate schema") + slog.ErrorContext(context.Background(), "Failed to generate schema", "error", err) os.Exit(-1) } } diff --git a/integrations/operator/crdgen/cmd/protoc-gen-crd/debug.go b/integrations/operator/crdgen/cmd/protoc-gen-crd/debug.go index bf19cf7eaca87..2da3e47ab9ec8 100644 --- a/integrations/operator/crdgen/cmd/protoc-gen-crd/debug.go +++ b/integrations/operator/crdgen/cmd/protoc-gen-crd/debug.go @@ -21,38 +21,37 @@ package main import ( + "context" + "log/slog" "os" - "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" - crdgen "github.com/gravitational/teleport/integrations/operator/crdgen" + logutils "github.com/gravitational/teleport/lib/utils/log" ) func main() { - log.SetLevel(log.DebugLevel) - log.SetOutput(os.Stderr) + slog.SetDefault(slog.New(logutils.NewSlogTextHandler(os.Stderr, + logutils.SlogTextHandlerConfig{ + Level: slog.LevelDebug, + }, + ))) + ctx := context.Background() inputPath := os.Getenv(crdgen.PluginInputPathEnvironment) if inputPath == "" { - log.Error( - trace.BadParameter( - "When built with the 'debug' tag, the input path must be set through the environment variable: %s", - crdgen.PluginInputPathEnvironment, - ), - ) + slog.ErrorContext(ctx, "When built with the 'debug' tag, the input path must be set through the TELEPORT_PROTOC_READ_FILE environment variable") os.Exit(-1) } - log.Infof("This is a debug build, the protoc request is read from the file: '%s'", inputPath) + slog.InfoContext(ctx, "This is a debug build, the protoc request is read from the file", "input_path", inputPath) req, err := crdgen.ReadRequestFromFile(inputPath) if err != nil { - log.WithError(err).Error("error reading request from file") + slog.ErrorContext(ctx, "error reading request from file", "error", err) os.Exit(-1) } if err := crdgen.HandleCRDRequest(req); err != nil { - log.WithError(err).Error("Failed to generate schema") + slog.ErrorContext(ctx, "Failed to generate schema", "error", err) os.Exit(-1) } } diff --git a/integrations/operator/crdgen/cmd/protoc-gen-crd/main.go b/integrations/operator/crdgen/cmd/protoc-gen-crd/main.go index 863af95862505..a557993626415 100644 --- a/integrations/operator/crdgen/cmd/protoc-gen-crd/main.go +++ b/integrations/operator/crdgen/cmd/protoc-gen-crd/main.go @@ -21,20 +21,26 @@ package main import ( + "context" + "log/slog" "os" "github.com/gogo/protobuf/vanity/command" - log "github.com/sirupsen/logrus" crdgen "github.com/gravitational/teleport/integrations/operator/crdgen" + logutils "github.com/gravitational/teleport/lib/utils/log" ) func main() { - log.SetLevel(log.DebugLevel) - log.SetOutput(os.Stderr) + slog.SetDefault(slog.New(logutils.NewSlogTextHandler(os.Stderr, + logutils.SlogTextHandlerConfig{ + Level: slog.LevelDebug, + }, + ))) + req := command.Read() if err := crdgen.HandleCRDRequest(req); err != nil { - log.WithError(err).Error("Failed to generate schema") + slog.ErrorContext(context.Background(), "Failed to generate schema", "error", err) os.Exit(-1) } } diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod index d3240ffff8135..5222dc914a105 100644 --- a/integrations/terraform/go.mod +++ b/integrations/terraform/go.mod @@ -21,7 +21,6 @@ require ( github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1 github.com/jonboulle/clockwork v0.4.0 - github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.10.0 google.golang.org/grpc v1.69.2 google.golang.org/protobuf v1.36.2 @@ -307,6 +306,7 @@ require ( github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sijms/go-ora/v2 v2.8.22 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect diff --git a/integrations/terraform/provider/errors.go b/integrations/terraform/provider/errors.go index d31715366d192..6c0f838b474bf 100644 --- a/integrations/terraform/provider/errors.go +++ b/integrations/terraform/provider/errors.go @@ -17,9 +17,11 @@ limitations under the License. package provider import ( + "context" + "log/slog" + "github.com/gravitational/trace" "github.com/hashicorp/terraform-plugin-framework/diag" - log "github.com/sirupsen/logrus" ) // diagFromWrappedErr wraps error with additional information @@ -43,7 +45,7 @@ func diagFromWrappedErr(summary string, err error, kind string) diag.Diagnostic // diagFromErr converts error to diag.Diagnostics. If logging level is debug, provides trace.DebugReport instead of short text. func diagFromErr(summary string, err error) diag.Diagnostic { - if log.GetLevel() >= log.DebugLevel { + if slog.Default().Enabled(context.Background(), slog.LevelDebug) { return diag.NewErrorDiagnostic(err.Error(), trace.DebugReport(err)) } diff --git a/integrations/terraform/provider/provider.go b/integrations/terraform/provider/provider.go index 13b20d20c434f..99d460a49f806 100644 --- a/integrations/terraform/provider/provider.go +++ b/integrations/terraform/provider/provider.go @@ -19,6 +19,7 @@ package provider import ( "context" "fmt" + "log/slog" "net" "os" "strconv" @@ -29,13 +30,13 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" - log "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/constants" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -305,7 +306,7 @@ func (p *Provider) Configure(ctx context.Context, req tfsdk.ConfigureProviderReq return } - log.WithFields(log.Fields{"addr": addr}).Debug("Using Teleport address") + slog.DebugContext(ctx, "Using Teleport address", "addr", addr) dialTimeoutDuration, err := time.ParseDuration(dialTimeoutDurationStr) if err != nil { @@ -393,7 +394,7 @@ func (p *Provider) Configure(ctx context.Context, req tfsdk.ConfigureProviderReq // checkTeleportVersion ensures that Teleport version is at least minServerVersion func (p *Provider) checkTeleportVersion(ctx context.Context, client *client.Client, resp *tfsdk.ConfigureProviderResponse) bool { - log.Debug("Checking Teleport server version") + slog.DebugContext(ctx, "Checking Teleport server version") pong, err := client.Ping(ctx) if err != nil { if trace.IsNotImplemented(err) { @@ -403,13 +404,13 @@ func (p *Provider) checkTeleportVersion(ctx context.Context, client *client.Clie ) return false } - log.WithError(err).Debug("Teleport version check error!") + slog.DebugContext(ctx, "Teleport version check error", "error", err) resp.Diagnostics.AddError("Unable to get Teleport server version!", "Unable to get Teleport server version!") return false } err = utils.CheckMinVersion(pong.ServerVersion, minServerVersion) if err != nil { - log.WithError(err).Debug("Teleport version check error!") + slog.DebugContext(ctx, "Teleport version check error", "error", err) resp.Diagnostics.AddError("Teleport version check error!", err.Error()) return false } @@ -447,7 +448,7 @@ func (p *Provider) validateAddr(addr string, resp *tfsdk.ConfigureProviderRespon _, _, err := net.SplitHostPort(addr) if err != nil { - log.WithField("addr", addr).WithError(err).Debug("Teleport address format error!") + slog.DebugContext(context.Background(), "Teleport address format error", "error", err, "addr", addr) resp.Diagnostics.AddError( "Invalid Teleport address format", fmt.Sprintf("Teleport address must be specified as host:port. Got %q", addr), @@ -461,20 +462,32 @@ func (p *Provider) validateAddr(addr string, resp *tfsdk.ConfigureProviderRespon // configureLog configures logging func (p *Provider) configureLog() { + level := slog.LevelError // Get Terraform log level - level, err := log.ParseLevel(os.Getenv("TF_LOG")) - if err != nil { - log.SetLevel(log.ErrorLevel) - } else { - log.SetLevel(level) + switch strings.ToLower(os.Getenv("TF_LOG")) { + case "panic", "fatal", "error": + level = slog.LevelError + case "warn", "warning": + level = slog.LevelWarn + case "info": + level = slog.LevelInfo + case "debug": + level = slog.LevelDebug + case "trace": + level = logutils.TraceLevel } - log.SetFormatter(&log.TextFormatter{}) + _, _, err := logutils.Initialize(logutils.Config{ + Severity: level.String(), + Format: "text", + }) + if err != nil { + return + } // Show GRPC debug logs only if TF_LOG=DEBUG - if log.GetLevel() >= log.DebugLevel { - l := grpclog.NewLoggerV2(log.StandardLogger().Out, log.StandardLogger().Out, log.StandardLogger().Out) - grpclog.SetLoggerV2(l) + if level <= slog.LevelDebug { + grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) } } diff --git a/lib/utils/log/log.go b/lib/utils/log/log.go index 2f16b902e3df6..d8aadb75146bf 100644 --- a/lib/utils/log/log.go +++ b/lib/utils/log/log.go @@ -42,6 +42,8 @@ type Config struct { ExtraFields []string // EnableColors dictates if output should be colored. EnableColors bool + // Padding to use for various components. + Padding int } // Initialize configures the default global logger based on the @@ -112,6 +114,7 @@ func Initialize(loggerConfig Config) (*slog.Logger, *slog.LevelVar, error) { Level: level, EnableColors: loggerConfig.EnableColors, ConfiguredFields: configuredFields, + Padding: loggerConfig.Padding, })) slog.SetDefault(logger) case "json": From 883c53a53cadf5885fb8eafec3e0f0f89c2f65a7 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Fri, 10 Jan 2025 11:58:56 -0500 Subject: [PATCH 42/45] Remove logrus dependency (#50930) This is the _last_ step required to migrate from logrus to slog. All components in the repository have been migrated to use slog allowing the logrus formatter to be deleted. The slog handler tests that validate the output have been updated to assert the format directly instead of comparing it to the output from the logrus formatter. --- .golangci.yml | 8 - go.mod | 2 +- lib/client/api.go | 2 +- lib/srv/desktop/rdp/rdpclient/client.go | 2 +- lib/utils/cli.go | 152 +-------- lib/utils/log/formatter_test.go | 420 +++++++++-------------- lib/utils/log/logrus_formatter.go | 427 ------------------------ lib/utils/log/slog.go | 20 -- lib/utils/log/slog_text_handler.go | 92 ++--- lib/utils/log/writer.go | 45 --- 10 files changed, 232 insertions(+), 938 deletions(-) delete mode 100644 lib/utils/log/logrus_formatter.go delete mode 100644 lib/utils/log/writer.go diff --git a/.golangci.yml b/.golangci.yml index 98859bad6c7d9..ecc5e7c8e253f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -115,14 +115,6 @@ linters-settings: desc: 'use "crypto" or "x/crypto" instead' # Prevent importing any additional logging libraries. logging: - files: - # Integrations are still allowed to use logrus becuase they haven't - # been converted to slog yet. Once they use slog, remove this exception. - - '!**/integrations/**' - # The log package still contains the logrus formatter consumed by the integrations. - # Remove this exception when said formatter is deleted. - - '!**/lib/utils/log/**' - - '!**/lib/utils/cli.go' deny: - pkg: github.com/sirupsen/logrus desc: 'use "log/slog" instead' diff --git a/go.mod b/go.mod index 3c35132910093..78f04732806b6 100644 --- a/go.mod +++ b/go.mod @@ -179,7 +179,6 @@ require ( github.com/sigstore/cosign/v2 v2.4.1 github.com/sigstore/sigstore v1.8.11 github.com/sijms/go-ora/v2 v2.8.22 - github.com/sirupsen/logrus v1.9.3 github.com/snowflakedb/gosnowflake v1.12.1 github.com/spf13/cobra v1.8.1 github.com/spiffe/go-spiffe/v2 v2.4.0 @@ -501,6 +500,7 @@ require ( github.com/sigstore/protobuf-specs v0.3.2 // indirect github.com/sigstore/rekor v1.3.6 // indirect github.com/sigstore/timestamp-authority v1.2.2 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.0 // indirect diff --git a/lib/client/api.go b/lib/client/api.go index ed94462aa9c73..8b4c317265573 100644 --- a/lib/client/api.go +++ b/lib/client/api.go @@ -2853,7 +2853,7 @@ type execResult struct { // sharedWriter is an [io.Writer] implementation that protects // writes with a mutex. This allows a single [io.Writer] to be shared -// by both logrus and slog without their output clobbering each other. +// by multiple command runners. type sharedWriter struct { mu sync.Mutex io.Writer diff --git a/lib/srv/desktop/rdp/rdpclient/client.go b/lib/srv/desktop/rdp/rdpclient/client.go index 821408d2208fa..534644e6be1df 100644 --- a/lib/srv/desktop/rdp/rdpclient/client.go +++ b/lib/srv/desktop/rdp/rdpclient/client.go @@ -93,7 +93,7 @@ func init() { var rustLogLevel string // initialize the Rust logger by setting $RUST_LOG based - // on the logrus log level + // on the slog log level // (unless RUST_LOG is already explicitly set, then we // assume the user knows what they want) rl := os.Getenv("RUST_LOG") diff --git a/lib/utils/cli.go b/lib/utils/cli.go index e79c0bc2aa8f0..648cf7095352f 100644 --- a/lib/utils/cli.go +++ b/lib/utils/cli.go @@ -26,7 +26,6 @@ import ( "flag" "fmt" "io" - stdlog "log" "log/slog" "os" "runtime" @@ -38,7 +37,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "golang.org/x/term" "github.com/gravitational/teleport" @@ -100,59 +98,18 @@ func InitLogger(purpose LoggingPurpose, level slog.Level, opts ...LoggerOption) opt(&o) } - logrus.StandardLogger().ReplaceHooks(make(logrus.LevelHooks)) - logrus.SetLevel(logutils.SlogLevelToLogrusLevel(level)) - - var ( - w io.Writer - enableColors bool - ) - switch purpose { - case LoggingForCLI: - // If debug logging was asked for on the CLI, then write logs to stderr. - // Otherwise, discard all logs. - if level == slog.LevelDebug { - enableColors = IsTerminal(os.Stderr) - w = logutils.NewSharedWriter(os.Stderr) - } else { - w = io.Discard - enableColors = false - } - case LoggingForDaemon: - enableColors = IsTerminal(os.Stderr) - w = logutils.NewSharedWriter(os.Stderr) - } - - var ( - formatter logrus.Formatter - handler slog.Handler - ) - switch o.format { - case LogFormatText, "": - textFormatter := logutils.NewDefaultTextFormatter(enableColors) - - // Calling CheckAndSetDefaults enables the timestamp field to - // be included in the output. The error returned is ignored - // because the default formatter cannot be invalid. - if purpose == LoggingForCLI && level == slog.LevelDebug { - _ = textFormatter.CheckAndSetDefaults() - } - - formatter = textFormatter - handler = logutils.NewSlogTextHandler(w, logutils.SlogTextHandlerConfig{ - Level: level, - EnableColors: enableColors, - }) - case LogFormatJSON: - formatter = &logutils.JSONFormatter{} - handler = logutils.NewSlogJSONHandler(w, logutils.SlogJSONHandlerConfig{ - Level: level, - }) + // If debug or trace logging is not enabled for CLIs, + // then discard all log output. + if purpose == LoggingForCLI && level > slog.LevelDebug { + slog.SetDefault(slog.New(logutils.DiscardHandler{})) + return } - logrus.SetFormatter(formatter) - logrus.SetOutput(w) - slog.SetDefault(slog.New(handler)) + logutils.Initialize(logutils.Config{ + Severity: level.String(), + Format: o.format, + EnableColors: IsTerminal(os.Stderr), + }) } var initTestLoggerOnce = sync.Once{} @@ -163,56 +120,24 @@ func InitLoggerForTests() { // Parse flags to check testing.Verbose(). flag.Parse() - level := slog.LevelWarn - w := io.Discard - if testing.Verbose() { - level = slog.LevelDebug - w = os.Stderr + if !testing.Verbose() { + slog.SetDefault(slog.New(logutils.DiscardHandler{})) + return } - logger := logrus.StandardLogger() - logger.SetFormatter(logutils.NewTestJSONFormatter()) - logger.SetLevel(logutils.SlogLevelToLogrusLevel(level)) - - output := logutils.NewSharedWriter(w) - logger.SetOutput(output) - slog.SetDefault(slog.New(logutils.NewSlogJSONHandler(output, logutils.SlogJSONHandlerConfig{Level: level}))) + logutils.Initialize(logutils.Config{ + Severity: slog.LevelDebug.String(), + Format: LogFormatJSON, + }) }) } -// NewLoggerForTests creates a new logrus logger for test environments. -func NewLoggerForTests() *logrus.Logger { - InitLoggerForTests() - return logrus.StandardLogger() -} - // NewSlogLoggerForTests creates a new slog logger for test environments. func NewSlogLoggerForTests() *slog.Logger { InitLoggerForTests() return slog.Default() } -// WrapLogger wraps an existing logger entry and returns -// a value satisfying the Logger interface -func WrapLogger(logger *logrus.Entry) Logger { - return &logWrapper{Entry: logger} -} - -// NewLogger creates a new empty logrus logger. -func NewLogger() *logrus.Logger { - return logrus.StandardLogger() -} - -// Logger describes a logger value -type Logger interface { - logrus.FieldLogger - // GetLevel specifies the level at which this logger - // value is logging - GetLevel() logrus.Level - // SetLevel sets the logger's level to the specified value - SetLevel(level logrus.Level) -} - // FatalError is for CLI front-ends: it detects gravitational/trace debugging // information, sends it to the logger, strips it off and prints a clean message to stderr func FatalError(err error) { @@ -231,7 +156,7 @@ func GetIterations() int { if err != nil { panic(err) } - logrus.Debugf("Starting tests with %v iterations.", iter) + slog.DebugContext(context.Background(), "Running tests multiple times due to presence of ITERATIONS environment variable", "iterations", iter) return iter } @@ -484,47 +409,6 @@ func AllowWhitespace(s string) string { return sb.String() } -// NewStdlogger creates a new stdlib logger that uses the specified leveled logger -// for output and the given component as a logging prefix. -func NewStdlogger(logger LeveledOutputFunc, component string) *stdlog.Logger { - return stdlog.New(&stdlogAdapter{ - log: logger, - }, component, stdlog.LstdFlags) -} - -// Write writes the specified buffer p to the underlying leveled logger. -// Implements io.Writer -func (r *stdlogAdapter) Write(p []byte) (n int, err error) { - r.log(string(p)) - return len(p), nil -} - -// stdlogAdapter is an io.Writer that writes into an instance -// of logrus.Logger -type stdlogAdapter struct { - log LeveledOutputFunc -} - -// LeveledOutputFunc describes a function that emits given -// arguments at a specific level to an underlying logger -type LeveledOutputFunc func(args ...interface{}) - -// GetLevel returns the level of the underlying logger -func (r *logWrapper) GetLevel() logrus.Level { - return r.Entry.Logger.GetLevel() -} - -// SetLevel sets the logging level to the given value -func (r *logWrapper) SetLevel(level logrus.Level) { - r.Entry.Logger.SetLevel(level) -} - -// logWrapper wraps a log entry. -// Implements Logger -type logWrapper struct { - *logrus.Entry -} - // needsQuoting returns true if any non-printable characters are found. func needsQuoting(text string) bool { for _, r := range text { diff --git a/lib/utils/log/formatter_test.go b/lib/utils/log/formatter_test.go index 9abb0310ba0be..aff0ec8be3a74 100644 --- a/lib/utils/log/formatter_test.go +++ b/lib/utils/log/formatter_test.go @@ -22,7 +22,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" "io" "log/slog" @@ -38,7 +37,6 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -48,7 +46,7 @@ import ( const message = "Adding diagnostic debugging handlers.\t To connect with profiler, use go tool pprof diag_addr." var ( - logErr = errors.New("the quick brown fox jumped really high") + logErr = &trace.BadParameterError{Message: "the quick brown fox jumped really high"} addr = fakeAddr{addr: "127.0.0.1:1234"} fields = map[string]any{ @@ -72,6 +70,10 @@ func (a fakeAddr) String() string { return a.addr } +func (a fakeAddr) MarshalText() (text []byte, err error) { + return []byte(a.addr), nil +} + func TestOutput(t *testing.T) { loc, err := time.LoadLocation("Africa/Cairo") require.NoError(t, err, "failed getting timezone") @@ -89,58 +91,50 @@ func TestOutput(t *testing.T) { // 4) the caller outputRegex := regexp.MustCompile(`(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z)(\s+.*)(".*diag_addr\.")(.*)(\slog/formatter_test.go:\d{3})`) + expectedFields := map[string]string{ + "local": addr.String(), + "remote": addr.String(), + "login": "llama", + "teleportUser": "user", + "id": "1234", + "test": "123", + "animal": `"llama\n"`, + "error": "[" + trace.DebugReport(logErr) + "]", + "diag_addr": addr.String(), + } + tests := []struct { - name string - logrusLevel logrus.Level - slogLevel slog.Level + name string + slogLevel slog.Level }{ { - name: "trace", - logrusLevel: logrus.TraceLevel, - slogLevel: TraceLevel, + name: "trace", + slogLevel: TraceLevel, }, { - name: "debug", - logrusLevel: logrus.DebugLevel, - slogLevel: slog.LevelDebug, + name: "debug", + slogLevel: slog.LevelDebug, }, { - name: "info", - logrusLevel: logrus.InfoLevel, - slogLevel: slog.LevelInfo, + name: "info", + slogLevel: slog.LevelInfo, }, { - name: "warn", - logrusLevel: logrus.WarnLevel, - slogLevel: slog.LevelWarn, + name: "warn", + slogLevel: slog.LevelWarn, }, { - name: "error", - logrusLevel: logrus.ErrorLevel, - slogLevel: slog.LevelError, + name: "error", + slogLevel: slog.LevelError, }, { - name: "fatal", - logrusLevel: logrus.FatalLevel, - slogLevel: slog.LevelError + 1, + name: "fatal", + slogLevel: slog.LevelError + 1, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Create a logrus logger using the custom formatter which outputs to a local buffer. - var logrusOutput bytes.Buffer - formatter := NewDefaultTextFormatter(true) - formatter.timestampEnabled = true - require.NoError(t, formatter.CheckAndSetDefaults()) - - logrusLogger := logrus.New() - logrusLogger.SetFormatter(formatter) - logrusLogger.SetOutput(&logrusOutput) - logrusLogger.ReplaceHooks(logrus.LevelHooks{}) - logrusLogger.SetLevel(test.logrusLevel) - entry := logrusLogger.WithField(teleport.ComponentKey, "test").WithTime(clock.Now().UTC()) - // Create a slog logger using the custom handler which outputs to a local buffer. var slogOutput bytes.Buffer slogConfig := SlogTextHandlerConfig{ @@ -155,13 +149,6 @@ func TestOutput(t *testing.T) { } slogLogger := slog.New(NewSlogTextHandler(&slogOutput, slogConfig)).With(teleport.ComponentKey, "test") - // Add some fields and output the message at the desired log level via logrus. - l := entry.WithField("test", 123).WithField("animal", "llama\n").WithField("error", logErr) - logrusTestLogLineNumber := func() int { - l.WithField("diag_addr", &addr).WithField(teleport.ComponentFields, fields).Log(test.logrusLevel, message) - return getCallerLineNumber() - 1 // Get the line number of this call, and assume the log call is right above it - }() - // Add some fields and output the message at the desired log level via slog. l2 := slogLogger.With("test", 123).With("animal", "llama\n").With("error", logErr) slogTestLogLineNumber := func() int { @@ -169,163 +156,144 @@ func TestOutput(t *testing.T) { return getCallerLineNumber() - 1 // Get the line number of this call, and assume the log call is right above it }() - // Validate that both loggers produces the same output. The added complexity comes from the fact that - // our custom slog handler does NOT sort the additional fields like our logrus formatter does. - logrusMatches := outputRegex.FindStringSubmatch(logrusOutput.String()) - require.NotEmpty(t, logrusMatches, "logrus output was in unexpected format: %s", logrusOutput.String()) + // Validate the logger output. The added complexity comes from the fact that + // our custom slog handler does NOT sort the additional fields. slogMatches := outputRegex.FindStringSubmatch(slogOutput.String()) require.NotEmpty(t, slogMatches, "slog output was in unexpected format: %s", slogOutput.String()) // The first match is the timestamp: 2023-10-31T10:09:06+02:00 - logrusTime, err := time.Parse(time.RFC3339, logrusMatches[1]) - assert.NoError(t, err, "invalid logrus timestamp found %s", logrusMatches[1]) - slogTime, err := time.Parse(time.RFC3339, slogMatches[1]) assert.NoError(t, err, "invalid slog timestamp found %s", slogMatches[1]) - - assert.InDelta(t, logrusTime.Unix(), slogTime.Unix(), 10) + assert.InDelta(t, clock.Now().Unix(), slogTime.Unix(), 10) // Match level, and component: DEBU [TEST] - assert.Empty(t, cmp.Diff(logrusMatches[2], slogMatches[2]), "level, and component to be identical") - // Match the log message: "Adding diagnostic debugging handlers.\t To connect with profiler, use go tool pprof diag_addr.\n" - assert.Empty(t, cmp.Diff(logrusMatches[3], slogMatches[3]), "expected output messages to be identical") + expectedLevel := formatLevel(test.slogLevel, true) + expectedComponent := formatComponent(slog.StringValue("test"), defaultComponentPadding) + expectedMatch := " " + expectedLevel + " " + expectedComponent + " " + assert.Equal(t, expectedMatch, slogMatches[2], "level, and component to be identical") + // Match the log message + assert.Equal(t, `"Adding diagnostic debugging handlers.\t To connect with profiler, use go tool pprof diag_addr."`, slogMatches[3], "expected output messages to be identical") // The last matches are the caller information - assert.Equal(t, fmt.Sprintf(" log/formatter_test.go:%d", logrusTestLogLineNumber), logrusMatches[5]) assert.Equal(t, fmt.Sprintf(" log/formatter_test.go:%d", slogTestLogLineNumber), slogMatches[5]) // The third matches are the fields which will be key value pairs(animal:llama) separated by a space. Since - // logrus sorts the fields and slog doesn't we can't just assert equality and instead build a map of the key + // slog doesn't sort the fields, we can't assert equality and instead build a map of the key // value pairs to ensure they are all present and accounted for. - logrusFieldMatches := fieldsRegex.FindAllStringSubmatch(logrusMatches[4], -1) slogFieldMatches := fieldsRegex.FindAllStringSubmatch(slogMatches[4], -1) // The first match is the key, the second match is the value - logrusFields := map[string]string{} - for _, match := range logrusFieldMatches { - logrusFields[strings.TrimSpace(match[1])] = strings.TrimSpace(match[2]) - } - slogFields := map[string]string{} for _, match := range slogFieldMatches { slogFields[strings.TrimSpace(match[1])] = strings.TrimSpace(match[2]) } - assert.Equal(t, slogFields, logrusFields) + require.Empty(t, + cmp.Diff( + expectedFields, + slogFields, + cmpopts.SortMaps(func(a, b string) bool { return a < b }), + ), + ) }) } }) t.Run("json", func(t *testing.T) { tests := []struct { - name string - logrusLevel logrus.Level - slogLevel slog.Level + name string + slogLevel slog.Level }{ { - name: "trace", - logrusLevel: logrus.TraceLevel, - slogLevel: TraceLevel, + name: "trace", + slogLevel: TraceLevel, }, { - name: "debug", - logrusLevel: logrus.DebugLevel, - slogLevel: slog.LevelDebug, + name: "debug", + slogLevel: slog.LevelDebug, }, { - name: "info", - logrusLevel: logrus.InfoLevel, - slogLevel: slog.LevelInfo, + name: "info", + slogLevel: slog.LevelInfo, }, { - name: "warn", - logrusLevel: logrus.WarnLevel, - slogLevel: slog.LevelWarn, + name: "warn", + slogLevel: slog.LevelWarn, }, { - name: "error", - logrusLevel: logrus.ErrorLevel, - slogLevel: slog.LevelError, + name: "error", + slogLevel: slog.LevelError, }, { - name: "fatal", - logrusLevel: logrus.FatalLevel, - slogLevel: slog.LevelError + 1, + name: "fatal", + slogLevel: slog.LevelError + 1, + }, + } + + expectedFields := map[string]any{ + "trace.fields": map[string]any{ + "teleportUser": "user", + "id": float64(1234), + "local": addr.String(), + "login": "llama", + "remote": addr.String(), }, + "test": float64(123), + "animal": `llama`, + "error": logErr.Error(), + "diag_addr": addr.String(), + "component": "test", + "message": message, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Create a logrus logger using the custom formatter which outputs to a local buffer. - var logrusOut bytes.Buffer - formatter := &JSONFormatter{ - ExtraFields: nil, - callerEnabled: true, - } - require.NoError(t, formatter.CheckAndSetDefaults()) - - logrusLogger := logrus.New() - logrusLogger.SetFormatter(formatter) - logrusLogger.SetOutput(&logrusOut) - logrusLogger.ReplaceHooks(logrus.LevelHooks{}) - logrusLogger.SetLevel(test.logrusLevel) - entry := logrusLogger.WithField(teleport.ComponentKey, "test") - // Create a slog logger using the custom formatter which outputs to a local buffer. var slogOutput bytes.Buffer slogLogger := slog.New(NewSlogJSONHandler(&slogOutput, SlogJSONHandlerConfig{Level: test.slogLevel})).With(teleport.ComponentKey, "test") - // Add some fields and output the message at the desired log level via logrus. - l := entry.WithField("test", 123).WithField("animal", "llama").WithField("error", trace.Wrap(logErr)) - logrusTestLogLineNumber := func() int { - l.WithField("diag_addr", addr.String()).Log(test.logrusLevel, message) - return getCallerLineNumber() - 1 // Get the line number of this call, and assume the log call is right above it - }() - // Add some fields and output the message at the desired log level via slog. l2 := slogLogger.With("test", 123).With("animal", "llama").With("error", trace.Wrap(logErr)) slogTestLogLineNumber := func() int { - l2.Log(context.Background(), test.slogLevel, message, "diag_addr", &addr) + l2.With(teleport.ComponentFields, fields).Log(context.Background(), test.slogLevel, message, "diag_addr", &addr) return getCallerLineNumber() - 1 // Get the line number of this call, and assume the log call is right above it }() - // The order of the fields emitted by the two loggers is different, so comparing the output directly - // for equality won't work. Instead, a map is built with all the key value pairs, excluding the caller - // and that map is compared to ensure all items are present and match. - var logrusData map[string]any - require.NoError(t, json.Unmarshal(logrusOut.Bytes(), &logrusData), "invalid logrus output format") - var slogData map[string]any require.NoError(t, json.Unmarshal(slogOutput.Bytes(), &slogData), "invalid slog output format") - logrusCaller, ok := logrusData["caller"].(string) - delete(logrusData, "caller") - assert.True(t, ok, "caller was missing from logrus output") - assert.Equal(t, fmt.Sprintf("log/formatter_test.go:%d", logrusTestLogLineNumber), logrusCaller) - slogCaller, ok := slogData["caller"].(string) delete(slogData, "caller") assert.True(t, ok, "caller was missing from slog output") assert.Equal(t, fmt.Sprintf("log/formatter_test.go:%d", slogTestLogLineNumber), slogCaller) - logrusTimestamp, ok := logrusData["timestamp"].(string) - delete(logrusData, "timestamp") - assert.True(t, ok, "time was missing from logrus output") + slogLevel, ok := slogData["level"].(string) + delete(slogData, "level") + assert.True(t, ok, "level was missing from slog output") + var expectedLevel string + switch test.slogLevel { + case TraceLevel: + expectedLevel = "trace" + case slog.LevelWarn: + expectedLevel = "warning" + case slog.LevelError + 1: + expectedLevel = "fatal" + default: + expectedLevel = test.slogLevel.String() + } + assert.Equal(t, strings.ToLower(expectedLevel), slogLevel) slogTimestamp, ok := slogData["timestamp"].(string) delete(slogData, "timestamp") assert.True(t, ok, "time was missing from slog output") - logrusTime, err := time.Parse(time.RFC3339, logrusTimestamp) - assert.NoError(t, err, "invalid logrus timestamp %s", logrusTimestamp) - slogTime, err := time.Parse(time.RFC3339, slogTimestamp) assert.NoError(t, err, "invalid slog timestamp %s", slogTimestamp) - assert.InDelta(t, logrusTime.Unix(), slogTime.Unix(), 10) + assert.InDelta(t, clock.Now().Unix(), slogTime.Unix(), 10) require.Empty(t, cmp.Diff( - logrusData, + expectedFields, slogData, cmpopts.SortMaps(func(a, b string) bool { return a < b }), ), @@ -347,38 +315,6 @@ func getCallerLineNumber() int { func BenchmarkFormatter(b *testing.B) { ctx := context.Background() b.ReportAllocs() - b.Run("logrus", func(b *testing.B) { - b.Run("text", func(b *testing.B) { - formatter := NewDefaultTextFormatter(true) - require.NoError(b, formatter.CheckAndSetDefaults()) - logger := logrus.New() - logger.SetFormatter(formatter) - logger.SetOutput(io.Discard) - b.ResetTimer() - - entry := logger.WithField(teleport.ComponentKey, "test") - for i := 0; i < b.N; i++ { - l := entry.WithField("test", 123).WithField("animal", "llama\n").WithField("error", logErr) - l.WithField("diag_addr", &addr).WithField(teleport.ComponentFields, fields).Info(message) - } - }) - - b.Run("json", func(b *testing.B) { - formatter := &JSONFormatter{} - require.NoError(b, formatter.CheckAndSetDefaults()) - logger := logrus.New() - logger.SetFormatter(formatter) - logger.SetOutput(io.Discard) - logger.ReplaceHooks(logrus.LevelHooks{}) - b.ResetTimer() - - entry := logger.WithField(teleport.ComponentKey, "test") - for i := 0; i < b.N; i++ { - l := entry.WithField("test", 123).WithField("animal", "llama\n").WithField("error", logErr) - l.WithField("diag_addr", &addr).WithField(teleport.ComponentFields, fields).Info(message) - } - }) - }) b.Run("slog", func(b *testing.B) { b.Run("default_text", func(b *testing.B) { @@ -430,47 +366,26 @@ func BenchmarkFormatter(b *testing.B) { } func TestConcurrentOutput(t *testing.T) { - t.Run("logrus", func(t *testing.T) { - debugFormatter := NewDefaultTextFormatter(true) - require.NoError(t, debugFormatter.CheckAndSetDefaults()) - logrus.SetFormatter(debugFormatter) - logrus.SetOutput(os.Stdout) - - logger := logrus.WithField(teleport.ComponentKey, "test") - - var wg sync.WaitGroup - for i := 0; i < 1000; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - logger.Infof("Detected Teleport component %d is running in a degraded state.", i) - }(i) - } - wg.Wait() - }) + logger := slog.New(NewSlogTextHandler(os.Stdout, SlogTextHandlerConfig{ + EnableColors: true, + })).With(teleport.ComponentKey, "test") - t.Run("slog", func(t *testing.T) { - logger := slog.New(NewSlogTextHandler(os.Stdout, SlogTextHandlerConfig{ - EnableColors: true, - })).With(teleport.ComponentKey, "test") - - var wg sync.WaitGroup - ctx := context.Background() - for i := 0; i < 1000; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - logger.InfoContext(ctx, "Teleport component entered degraded state", - slog.Int("component", i), - slog.Group("group", - slog.String("test", "123"), - slog.String("animal", "llama"), - ), - ) - }(i) - } - wg.Wait() - }) + var wg sync.WaitGroup + ctx := context.Background() + for i := 0; i < 1000; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + logger.InfoContext(ctx, "Teleport component entered degraded state", + slog.Int("component", i), + slog.Group("group", + slog.String("test", "123"), + slog.String("animal", "llama"), + ), + ) + }(i) + } + wg.Wait() } // allPossibleSubsets returns all combinations of subsets for the @@ -493,58 +408,34 @@ func allPossibleSubsets(in []string) [][]string { return subsets } -// TestExtraFields validates that the output is identical for the -// logrus formatter and slog handler based on the configured extra -// fields. +// TestExtraFields validates that the output is expected for the +// slog handler based on the configured extra fields. func TestExtraFields(t *testing.T) { // Capture a fake time that all output will use. now := clockwork.NewFakeClock().Now() // Capture the caller information to be injected into all messages. pc, _, _, _ := runtime.Caller(0) - fs := runtime.CallersFrames([]uintptr{pc}) - f, _ := fs.Next() - callerTrace := &trace.Trace{ - Func: f.Function, - Path: f.File, - Line: f.Line, - } const message = "testing 123" - // Test against every possible configured combination of allowed format fields. - fields := allPossibleSubsets(defaultFormatFields) - t.Run("text", func(t *testing.T) { - for _, configuredFields := range fields { + // Test against every possible configured combination of allowed format fields. + for _, configuredFields := range allPossibleSubsets(defaultFormatFields) { name := "not configured" if len(configuredFields) > 0 { name = strings.Join(configuredFields, " ") } t.Run(name, func(t *testing.T) { - logrusFormatter := TextFormatter{ - ExtraFields: configuredFields, - } - // Call CheckAndSetDefaults to exercise the extra fields logic. Since - // FormatCaller is always overridden within CheckAndSetDefaults, it is - // explicitly set afterward so the caller points to our fake call site. - require.NoError(t, logrusFormatter.CheckAndSetDefaults()) - logrusFormatter.FormatCaller = callerTrace.String - - var slogOutput bytes.Buffer - var slogHandler slog.Handler = NewSlogTextHandler(&slogOutput, SlogTextHandlerConfig{ConfiguredFields: configuredFields}) - - entry := &logrus.Entry{ - Data: logrus.Fields{"animal": "llama", "vegetable": "carrot", teleport.ComponentKey: "test"}, - Time: now, - Level: logrus.DebugLevel, - Caller: &f, - Message: message, - } - - logrusOut, err := logrusFormatter.Format(entry) - require.NoError(t, err) + replaced := map[string]struct{}{} + var slogHandler slog.Handler = NewSlogTextHandler(io.Discard, SlogTextHandlerConfig{ + ConfiguredFields: configuredFields, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + replaced[a.Key] = struct{}{} + return a + }, + }) record := slog.Record{ Time: now, @@ -557,42 +448,29 @@ func TestExtraFields(t *testing.T) { require.NoError(t, slogHandler.Handle(context.Background(), record)) - require.Equal(t, string(logrusOut), slogOutput.String()) + for k := range replaced { + delete(replaced, k) + } + + require.Empty(t, replaced, replaced) }) } }) t.Run("json", func(t *testing.T) { - for _, configuredFields := range fields { + // Test against every possible configured combination of allowed format fields. + // Note, the json handler limits the allowed fields to a subset of those allowed + // by the text handler. + for _, configuredFields := range allPossibleSubsets([]string{CallerField, ComponentField, TimestampField}) { name := "not configured" if len(configuredFields) > 0 { name = strings.Join(configuredFields, " ") } t.Run(name, func(t *testing.T) { - logrusFormatter := JSONFormatter{ - ExtraFields: configuredFields, - } - // Call CheckAndSetDefaults to exercise the extra fields logic. Since - // FormatCaller is always overridden within CheckAndSetDefaults, it is - // explicitly set afterward so the caller points to our fake call site. - require.NoError(t, logrusFormatter.CheckAndSetDefaults()) - logrusFormatter.FormatCaller = callerTrace.String - var slogOutput bytes.Buffer var slogHandler slog.Handler = NewSlogJSONHandler(&slogOutput, SlogJSONHandlerConfig{ConfiguredFields: configuredFields}) - entry := &logrus.Entry{ - Data: logrus.Fields{"animal": "llama", "vegetable": "carrot", teleport.ComponentKey: "test"}, - Time: now, - Level: logrus.DebugLevel, - Caller: &f, - Message: message, - } - - logrusOut, err := logrusFormatter.Format(entry) - require.NoError(t, err) - record := slog.Record{ Time: now, Message: message, @@ -604,11 +482,31 @@ func TestExtraFields(t *testing.T) { require.NoError(t, slogHandler.Handle(context.Background(), record)) - var slogData, logrusData map[string]any - require.NoError(t, json.Unmarshal(logrusOut, &logrusData)) + var slogData map[string]any require.NoError(t, json.Unmarshal(slogOutput.Bytes(), &slogData)) - require.Equal(t, slogData, logrusData) + delete(slogData, "animal") + delete(slogData, "vegetable") + delete(slogData, "message") + delete(slogData, "level") + + var expectedLen int + expectedFields := configuredFields + switch l := len(configuredFields); l { + case 0: + // The level field was removed above, but is included in the default fields + expectedLen = len(defaultFormatFields) - 1 + expectedFields = defaultFormatFields + default: + expectedLen = l + } + require.Len(t, slogData, expectedLen, slogData) + + for _, f := range expectedFields { + delete(slogData, f) + } + + require.Empty(t, slogData, slogData) }) } }) diff --git a/lib/utils/log/logrus_formatter.go b/lib/utils/log/logrus_formatter.go deleted file mode 100644 index 14ad8441da7cc..0000000000000 --- a/lib/utils/log/logrus_formatter.go +++ /dev/null @@ -1,427 +0,0 @@ -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package log - -import ( - "fmt" - "regexp" - "runtime" - "slices" - "strconv" - "strings" - - "github.com/gravitational/trace" - "github.com/sirupsen/logrus" - - "github.com/gravitational/teleport" -) - -// TextFormatter is a [logrus.Formatter] that outputs messages in -// a textual format. -type TextFormatter struct { - // ComponentPadding is a padding to pick when displaying - // and formatting component field, defaults to DefaultComponentPadding - ComponentPadding int - // EnableColors enables colored output - EnableColors bool - // FormatCaller is a function to return (part) of source file path for output. - // Defaults to filePathAndLine() if unspecified - FormatCaller func() (caller string) - // ExtraFields represent the extra fields that will be added to the log message - ExtraFields []string - // TimestampEnabled specifies if timestamp is enabled in logs - timestampEnabled bool - // CallerEnabled specifies if caller is enabled in logs - callerEnabled bool -} - -type writer struct { - b *buffer -} - -func newWriter() *writer { - return &writer{b: &buffer{}} -} - -func (w *writer) Len() int { - return len(*w.b) -} - -func (w *writer) WriteString(s string) (int, error) { - return w.b.WriteString(s) -} - -func (w *writer) WriteByte(c byte) error { - return w.b.WriteByte(c) -} - -func (w *writer) Bytes() []byte { - return *w.b -} - -// NewDefaultTextFormatter creates a TextFormatter with -// the default options set. -func NewDefaultTextFormatter(enableColors bool) *TextFormatter { - return &TextFormatter{ - ComponentPadding: defaultComponentPadding, - FormatCaller: formatCallerWithPathAndLine, - ExtraFields: defaultFormatFields, - EnableColors: enableColors, - callerEnabled: true, - timestampEnabled: false, - } -} - -// CheckAndSetDefaults checks and sets log format configuration. -func (tf *TextFormatter) CheckAndSetDefaults() error { - // set padding - if tf.ComponentPadding == 0 { - tf.ComponentPadding = defaultComponentPadding - } - // set caller - tf.FormatCaller = formatCallerWithPathAndLine - - // set log formatting - if tf.ExtraFields == nil { - tf.timestampEnabled = true - tf.callerEnabled = true - tf.ExtraFields = defaultFormatFields - return nil - } - - if slices.Contains(tf.ExtraFields, TimestampField) { - tf.timestampEnabled = true - } - - if slices.Contains(tf.ExtraFields, CallerField) { - tf.callerEnabled = true - } - - return nil -} - -// Format formats each log line as configured in teleport config file. -func (tf *TextFormatter) Format(e *logrus.Entry) ([]byte, error) { - caller := tf.FormatCaller() - w := newWriter() - - // write timestamp first if enabled - if tf.timestampEnabled { - *w.b = appendRFC3339Millis(*w.b, e.Time.Round(0)) - } - - for _, field := range tf.ExtraFields { - switch field { - case LevelField: - var color int - var level string - switch e.Level { - case logrus.TraceLevel: - level = "TRACE" - color = gray - case logrus.DebugLevel: - level = "DEBUG" - color = gray - case logrus.InfoLevel: - level = "INFO" - color = blue - case logrus.WarnLevel: - level = "WARN" - color = yellow - case logrus.ErrorLevel: - level = "ERROR" - color = red - case logrus.FatalLevel: - level = "FATAL" - color = red - default: - color = blue - level = strings.ToUpper(e.Level.String()) - } - - if !tf.EnableColors { - color = noColor - } - - w.writeField(padMax(level, defaultLevelPadding), color) - case ComponentField: - padding := defaultComponentPadding - if tf.ComponentPadding != 0 { - padding = tf.ComponentPadding - } - if w.Len() > 0 { - w.WriteByte(' ') - } - component, ok := e.Data[teleport.ComponentKey].(string) - if ok && component != "" { - component = fmt.Sprintf("[%v]", component) - } - component = strings.ToUpper(padMax(component, padding)) - if component[len(component)-1] != ' ' { - component = component[:len(component)-1] + "]" - } - - w.WriteString(component) - default: - if _, ok := knownFormatFields[field]; !ok { - return nil, trace.BadParameter("invalid log format key: %v", field) - } - } - } - - // always use message - if e.Message != "" { - w.writeField(e.Message, noColor) - } - - if len(e.Data) > 0 { - w.writeMap(e.Data) - } - - // write caller last if enabled - if tf.callerEnabled && caller != "" { - w.writeField(caller, noColor) - } - - w.WriteByte('\n') - return w.Bytes(), nil -} - -// JSONFormatter implements the [logrus.Formatter] interface and adds extra -// fields to log entries. -type JSONFormatter struct { - logrus.JSONFormatter - - ExtraFields []string - // FormatCaller is a function to return (part) of source file path for output. - // Defaults to filePathAndLine() if unspecified - FormatCaller func() (caller string) - - callerEnabled bool - componentEnabled bool -} - -// CheckAndSetDefaults checks and sets log format configuration. -func (j *JSONFormatter) CheckAndSetDefaults() error { - // set log formatting - if j.ExtraFields == nil { - j.ExtraFields = defaultFormatFields - } - // set caller - j.FormatCaller = formatCallerWithPathAndLine - - if slices.Contains(j.ExtraFields, CallerField) { - j.callerEnabled = true - } - - if slices.Contains(j.ExtraFields, ComponentField) { - j.componentEnabled = true - } - - // rename default fields - j.JSONFormatter = logrus.JSONFormatter{ - FieldMap: logrus.FieldMap{ - logrus.FieldKeyTime: TimestampField, - logrus.FieldKeyLevel: LevelField, - logrus.FieldKeyMsg: messageField, - }, - DisableTimestamp: !slices.Contains(j.ExtraFields, TimestampField), - } - - return nil -} - -// Format formats each log line as configured in teleport config file. -func (j *JSONFormatter) Format(e *logrus.Entry) ([]byte, error) { - if j.callerEnabled { - path := j.FormatCaller() - e.Data[CallerField] = path - } - - if j.componentEnabled { - e.Data[ComponentField] = e.Data[teleport.ComponentKey] - } - - delete(e.Data, teleport.ComponentKey) - - return j.JSONFormatter.Format(e) -} - -// NewTestJSONFormatter creates a JSONFormatter that is -// configured for output in tests. -func NewTestJSONFormatter() *JSONFormatter { - formatter := &JSONFormatter{} - if err := formatter.CheckAndSetDefaults(); err != nil { - panic(err) - } - return formatter -} - -func (w *writer) writeError(value interface{}) { - switch err := value.(type) { - case trace.Error: - *w.b = fmt.Appendf(*w.b, "[%v]", err.DebugReport()) - default: - *w.b = fmt.Appendf(*w.b, "[%v]", value) - } -} - -func (w *writer) writeField(value interface{}, color int) { - if w.Len() > 0 { - w.WriteByte(' ') - } - w.writeValue(value, color) -} - -func (w *writer) writeKeyValue(key string, value interface{}) { - if w.Len() > 0 { - w.WriteByte(' ') - } - w.WriteString(key) - w.WriteByte(':') - if key == logrus.ErrorKey { - w.writeError(value) - return - } - w.writeValue(value, noColor) -} - -func (w *writer) writeValue(value interface{}, color int) { - if s, ok := value.(string); ok { - if color != noColor { - *w.b = fmt.Appendf(*w.b, "\u001B[%dm", color) - } - - if needsQuoting(s) { - *w.b = strconv.AppendQuote(*w.b, s) - } else { - *w.b = fmt.Append(*w.b, s) - } - - if color != noColor { - *w.b = fmt.Append(*w.b, "\u001B[0m") - } - return - } - - if color != noColor { - *w.b = fmt.Appendf(*w.b, "\x1b[%dm%v\x1b[0m", color, value) - return - } - - *w.b = fmt.Appendf(*w.b, "%v", value) -} - -func (w *writer) writeMap(m map[string]any) { - if len(m) == 0 { - return - } - keys := make([]string, 0, len(m)) - for key := range m { - keys = append(keys, key) - } - slices.Sort(keys) - for _, key := range keys { - if key == teleport.ComponentKey { - continue - } - switch value := m[key].(type) { - case map[string]any: - w.writeMap(value) - case logrus.Fields: - w.writeMap(value) - default: - w.writeKeyValue(key, value) - } - } -} - -type frameCursor struct { - // current specifies the current stack frame. - // if omitted, rest contains the complete stack - current *runtime.Frame - // rest specifies the rest of stack frames to explore - rest *runtime.Frames - // n specifies the total number of stack frames - n int -} - -// formatCallerWithPathAndLine formats the caller in the form path/segment: -// for output in the log -func formatCallerWithPathAndLine() (path string) { - if cursor := findFrame(); cursor != nil { - t := newTraceFromFrames(*cursor, nil) - return t.Loc() - } - return "" -} - -var frameIgnorePattern = regexp.MustCompile(`github\.com/sirupsen/logrus`) - -// findFrames positions the stack pointer to the first -// function that does not match the frameIngorePattern -// and returns the rest of the stack frames -func findFrame() *frameCursor { - var buf [32]uintptr - // Skip enough frames to start at user code. - // This number is a mere hint to the following loop - // to start as close to user code as possible and getting it right is not mandatory. - // The skip count might need to get updated if the call to findFrame is - // moved up/down the call stack - n := runtime.Callers(4, buf[:]) - pcs := buf[:n] - frames := runtime.CallersFrames(pcs) - for i := 0; i < n; i++ { - frame, _ := frames.Next() - if !frameIgnorePattern.MatchString(frame.Function) { - return &frameCursor{ - current: &frame, - rest: frames, - n: n, - } - } - } - return nil -} - -func newTraceFromFrames(cursor frameCursor, err error) *trace.TraceErr { - traces := make(trace.Traces, 0, cursor.n) - if cursor.current != nil { - traces = append(traces, frameToTrace(*cursor.current)) - } - for { - frame, more := cursor.rest.Next() - traces = append(traces, frameToTrace(frame)) - if !more { - break - } - } - return &trace.TraceErr{ - Err: err, - Traces: traces, - } -} - -func frameToTrace(frame runtime.Frame) trace.Trace { - return trace.Trace{ - Func: frame.Function, - Path: frame.File, - Line: frame.Line, - } -} diff --git a/lib/utils/log/slog.go b/lib/utils/log/slog.go index 46f0e13627b3e..bfb34f4a94114 100644 --- a/lib/utils/log/slog.go +++ b/lib/utils/log/slog.go @@ -27,7 +27,6 @@ import ( "unicode" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" oteltrace "go.opentelemetry.io/otel/trace" ) @@ -68,25 +67,6 @@ var SupportedLevelsText = []string{ slog.LevelError.String(), } -// SlogLevelToLogrusLevel converts a [slog.Level] to its equivalent -// [logrus.Level]. -func SlogLevelToLogrusLevel(level slog.Level) logrus.Level { - switch level { - case TraceLevel: - return logrus.TraceLevel - case slog.LevelDebug: - return logrus.DebugLevel - case slog.LevelInfo: - return logrus.InfoLevel - case slog.LevelWarn: - return logrus.WarnLevel - case slog.LevelError: - return logrus.ErrorLevel - default: - return logrus.FatalLevel - } -} - // DiscardHandler is a [slog.Handler] that discards all messages. It // is more efficient than a [slog.Handler] which outputs to [io.Discard] since // it performs zero formatting. diff --git a/lib/utils/log/slog_text_handler.go b/lib/utils/log/slog_text_handler.go index 7f93a388977bb..612615ba8582d 100644 --- a/lib/utils/log/slog_text_handler.go +++ b/lib/utils/log/slog_text_handler.go @@ -150,45 +150,12 @@ func (s *SlogTextHandler) Handle(ctx context.Context, r slog.Record) error { // Processing fields in this manner allows users to // configure the level and component position in the output. - // This matches the behavior of the original logrus. All other + // This matches the behavior of the original logrus formatter. All other // fields location in the output message are static. for _, field := range s.cfg.ConfiguredFields { switch field { case LevelField: - var color int - var level string - switch r.Level { - case TraceLevel: - level = "TRACE" - color = gray - case slog.LevelDebug: - level = "DEBUG" - color = gray - case slog.LevelInfo: - level = "INFO" - color = blue - case slog.LevelWarn: - level = "WARN" - color = yellow - case slog.LevelError: - level = "ERROR" - color = red - case slog.LevelError + 1: - level = "FATAL" - color = red - default: - color = blue - level = r.Level.String() - } - - if !s.cfg.EnableColors { - color = noColor - } - - level = padMax(level, defaultLevelPadding) - if color != noColor { - level = fmt.Sprintf("\u001B[%dm%s\u001B[0m", color, level) - } + level := formatLevel(r.Level, s.cfg.EnableColors) if rep == nil { state.appendKey(slog.LevelKey) @@ -211,12 +178,8 @@ func (s *SlogTextHandler) Handle(ctx context.Context, r slog.Record) error { if attr.Key != teleport.ComponentKey { return true } - component = fmt.Sprintf("[%v]", attr.Value) - component = strings.ToUpper(padMax(component, s.cfg.Padding)) - if component[len(component)-1] != ' ' { - component = component[:len(component)-1] + "]" - } + component = formatComponent(attr.Value, s.cfg.Padding) return false }) @@ -271,6 +234,55 @@ func (s *SlogTextHandler) Handle(ctx context.Context, r slog.Record) error { return err } +func formatLevel(value slog.Level, enableColors bool) string { + var color int + var level string + switch value { + case TraceLevel: + level = "TRACE" + color = gray + case slog.LevelDebug: + level = "DEBUG" + color = gray + case slog.LevelInfo: + level = "INFO" + color = blue + case slog.LevelWarn: + level = "WARN" + color = yellow + case slog.LevelError: + level = "ERROR" + color = red + case slog.LevelError + 1: + level = "FATAL" + color = red + default: + color = blue + level = value.String() + } + + if !enableColors { + color = noColor + } + + level = padMax(level, defaultLevelPadding) + if color != noColor { + level = fmt.Sprintf("\u001B[%dm%s\u001B[0m", color, level) + } + + return level +} + +func formatComponent(value slog.Value, padding int) string { + component := fmt.Sprintf("[%v]", value) + component = strings.ToUpper(padMax(component, padding)) + if component[len(component)-1] != ' ' { + component = component[:len(component)-1] + "]" + } + + return component +} + func (s *SlogTextHandler) clone() *SlogTextHandler { // We can't use assignment because we can't copy the mutex. return &SlogTextHandler{ diff --git a/lib/utils/log/writer.go b/lib/utils/log/writer.go deleted file mode 100644 index 77cf3037a8b66..0000000000000 --- a/lib/utils/log/writer.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package log - -import ( - "io" - "sync" -) - -// SharedWriter is an [io.Writer] implementation that protects -// writes with a mutex. This allows a single [io.Writer] to be shared -// by both logrus and slog without their output clobbering each other. -type SharedWriter struct { - mu sync.Mutex - io.Writer -} - -func (s *SharedWriter) Write(p []byte) (int, error) { - s.mu.Lock() - defer s.mu.Unlock() - - return s.Writer.Write(p) -} - -// NewSharedWriter wraps the provided [io.Writer] in a writer that -// is thread safe. -func NewSharedWriter(w io.Writer) *SharedWriter { - return &SharedWriter{Writer: w} -} From f63a099ca797ac6c82b86190504c8e2fbe795ef4 Mon Sep 17 00:00:00 2001 From: Paul Gottschling Date: Fri, 10 Jan 2025 13:01:35 -0500 Subject: [PATCH 43/45] Add Access Monitoring compatibility docs warning (#50571) Closes #48745 Add a warning to the External Audit Storage page that this feature is not compatible with Access Monitoring on Teleport Enterprise (Cloud), complementing the warning on the Access Monitoring page. --- .../admin-guides/access-controls/access-monitoring.mdx | 2 +- .../admin-guides/management/external-audit-storage.mdx | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/pages/admin-guides/access-controls/access-monitoring.mdx b/docs/pages/admin-guides/access-controls/access-monitoring.mdx index 7f5a7b2a0a864..25797cf3e89d3 100644 --- a/docs/pages/admin-guides/access-controls/access-monitoring.mdx +++ b/docs/pages/admin-guides/access-controls/access-monitoring.mdx @@ -17,7 +17,7 @@ Users are able to write their own custom access monitoring queries by querying t Access Monitoring is not currently supported with External Audit Storage - in Teleport Enterprise (cloud-hosted). This functionality will be + in Teleport Enterprise (Cloud). This functionality will be enabled in a future Teleport release. diff --git a/docs/pages/admin-guides/management/external-audit-storage.mdx b/docs/pages/admin-guides/management/external-audit-storage.mdx index 6aa2fcc0368b8..587bb7ffebe56 100644 --- a/docs/pages/admin-guides/management/external-audit-storage.mdx +++ b/docs/pages/admin-guides/management/external-audit-storage.mdx @@ -21,6 +21,12 @@ External Audit Storage is based on Teleport's available on Teleport Enterprise Cloud clusters running Teleport v14.2.1 or above. + +On Teleport Enterprise (Cloud), External Audit +Storage is not currently supported for users who have Access Monitoring enabled. +This functionality will be enabled in a future Teleport release. + + ## Prerequisites 1. A Teleport Enterprise Cloud account. If you do not have one, [sign From 84956a8c8fc39e0c8004c59332b6046b1dc63063 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Fri, 10 Jan 2025 13:48:39 -0500 Subject: [PATCH 44/45] Migrate eks discovery to aws sdk v2 (#50603) * Remove all references to EKS sdk v1. * Address PR comments. --- lib/cloud/awsconfig/awsconfig.go | 8 +- lib/cloud/clients.go | 23 -- lib/cloud/mocks/aws.go | 85 ---- lib/cloud/mocks/aws_config.go | 4 +- lib/cloud/mocks/aws_sts.go | 6 + .../awsoidc/eks_enroll_clusters.go | 19 +- lib/kube/proxy/cluster_details.go | 75 ++-- lib/kube/proxy/kube_creds_test.go | 100 ++++- lib/kube/proxy/server.go | 23 ++ lib/kube/proxy/watcher.go | 1 + lib/kube/utils/eks_token_signed.go | 46 ++- lib/srv/db/cloud/iam_test.go | 2 + lib/srv/db/common/auth_test.go | 6 +- lib/srv/discovery/access_graph.go | 1 + lib/srv/discovery/common/kubernetes.go | 9 +- lib/srv/discovery/common/kubernetes_test.go | 16 +- lib/srv/discovery/common/renaming_test.go | 10 +- lib/srv/discovery/discovery.go | 49 ++- lib/srv/discovery/discovery_test.go | 362 ++++++++++-------- .../discovery/fetchers/aws-sync/aws-sync.go | 2 + lib/srv/discovery/fetchers/aws-sync/eks.go | 185 +++++---- .../discovery/fetchers/aws-sync/eks_test.go | 145 +++++-- lib/srv/discovery/fetchers/eks.go | 249 ++++++------ lib/srv/discovery/fetchers/eks_test.go | 142 ++++--- .../kube_integration_watcher_test.go | 100 +++-- 25 files changed, 966 insertions(+), 702 deletions(-) diff --git a/lib/cloud/awsconfig/awsconfig.go b/lib/cloud/awsconfig/awsconfig.go index 7b1cabe5ffe75..245fe8a9a6b23 100644 --- a/lib/cloud/awsconfig/awsconfig.go +++ b/lib/cloud/awsconfig/awsconfig.go @@ -280,11 +280,11 @@ func getBaseConfig(ctx context.Context, region string, opts *options) (aws.Confi } func getConfigForRoleChain(ctx context.Context, cfg aws.Config, roles []AssumeRole, newCltFn STSClientProviderFunc) (aws.Config, error) { - for _, r := range roles { - cfg.Credentials = getAssumeRoleProvider(ctx, newCltFn(cfg), r) - } if len(roles) > 0 { - // no point caching every assumed role in the chain, we can just cache + for _, r := range roles { + cfg.Credentials = getAssumeRoleProvider(ctx, newCltFn(cfg), r) + } + // No point caching every assumed role in the chain, we can just cache // the last one. cfg.Credentials = aws.NewCredentialsCache(cfg.Credentials, awsCredentialsCacheOptions) if _, err := cfg.Credentials.Retrieve(ctx); err != nil { diff --git a/lib/cloud/clients.go b/lib/cloud/clients.go index 99c2deb4001f0..28e8ebabac598 100644 --- a/lib/cloud/clients.go +++ b/lib/cloud/clients.go @@ -39,8 +39,6 @@ import ( "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" awssession "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" "github.com/aws/aws-sdk-go/service/iam" @@ -127,8 +125,6 @@ type AWSClients interface { GetAWSIAMClient(ctx context.Context, region string, opts ...AWSOptionsFn) (iamiface.IAMAPI, error) // GetAWSSTSClient returns AWS STS client for the specified region. GetAWSSTSClient(ctx context.Context, region string, opts ...AWSOptionsFn) (stsiface.STSAPI, error) - // GetAWSEKSClient returns AWS EKS client for the specified region. - GetAWSEKSClient(ctx context.Context, region string, opts ...AWSOptionsFn) (eksiface.EKSAPI, error) // GetAWSKMSClient returns AWS KMS client for the specified region. GetAWSKMSClient(ctx context.Context, region string, opts ...AWSOptionsFn) (kmsiface.KMSAPI, error) // GetAWSS3Client returns AWS S3 client. @@ -585,15 +581,6 @@ func (c *cloudClients) GetAWSSTSClient(ctx context.Context, region string, opts return sts.New(session), nil } -// GetAWSEKSClient returns AWS EKS client for the specified region. -func (c *cloudClients) GetAWSEKSClient(ctx context.Context, region string, opts ...AWSOptionsFn) (eksiface.EKSAPI, error) { - session, err := c.GetAWSSession(ctx, region, opts...) - if err != nil { - return nil, trace.Wrap(err) - } - return eks.New(session), nil -} - // GetAWSKMSClient returns AWS KMS client for the specified region. func (c *cloudClients) GetAWSKMSClient(ctx context.Context, region string, opts ...AWSOptionsFn) (kmsiface.KMSAPI, error) { session, err := c.GetAWSSession(ctx, region, opts...) @@ -1032,7 +1019,6 @@ type TestCloudClients struct { GCPProjects gcp.ProjectsClient GCPInstances gcp.InstancesClient InstanceMetadata imds.Client - EKS eksiface.EKSAPI KMS kmsiface.KMSAPI S3 s3iface.S3API AzureMySQL azure.DBServersClient @@ -1173,15 +1159,6 @@ func (c *TestCloudClients) GetAWSSTSClient(ctx context.Context, region string, o return c.STS, nil } -// GetAWSEKSClient returns AWS EKS client for the specified region. -func (c *TestCloudClients) GetAWSEKSClient(ctx context.Context, region string, opts ...AWSOptionsFn) (eksiface.EKSAPI, error) { - _, err := c.GetAWSSession(ctx, region, opts...) - if err != nil { - return nil, trace.Wrap(err) - } - return c.EKS, nil -} - // GetAWSKMSClient returns AWS KMS client for the specified region. func (c *TestCloudClients) GetAWSKMSClient(ctx context.Context, region string, opts ...AWSOptionsFn) (kmsiface.KMSAPI, error) { _, err := c.GetAWSSession(ctx, region, opts...) diff --git a/lib/cloud/mocks/aws.go b/lib/cloud/mocks/aws.go index ceb50bd822cc2..9ba40628e3a92 100644 --- a/lib/cloud/mocks/aws.go +++ b/lib/cloud/mocks/aws.go @@ -28,8 +28,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/aws/aws-sdk-go/service/sts" @@ -288,86 +286,3 @@ func (m *IAMErrorMock) PutUserPolicyWithContext(ctx aws.Context, input *iam.PutU } return nil, trace.AccessDenied("unauthorized") } - -// EKSMock is a mock EKS client. -type EKSMock struct { - eksiface.EKSAPI - Clusters []*eks.Cluster - AccessEntries []*eks.AccessEntry - AssociatedPolicies []*eks.AssociatedAccessPolicy - Notify chan struct{} -} - -func (e *EKSMock) DescribeClusterWithContext(_ aws.Context, req *eks.DescribeClusterInput, _ ...request.Option) (*eks.DescribeClusterOutput, error) { - defer func() { - if e.Notify != nil { - e.Notify <- struct{}{} - } - }() - for _, cluster := range e.Clusters { - if aws.StringValue(req.Name) == aws.StringValue(cluster.Name) { - return &eks.DescribeClusterOutput{Cluster: cluster}, nil - } - } - return nil, trace.NotFound("cluster %v not found", aws.StringValue(req.Name)) -} - -func (e *EKSMock) ListClustersPagesWithContext(_ aws.Context, _ *eks.ListClustersInput, f func(*eks.ListClustersOutput, bool) bool, _ ...request.Option) error { - defer func() { - if e.Notify != nil { - e.Notify <- struct{}{} - } - }() - clusters := make([]*string, 0, len(e.Clusters)) - for _, cluster := range e.Clusters { - clusters = append(clusters, cluster.Name) - } - f(&eks.ListClustersOutput{ - Clusters: clusters, - }, true) - return nil -} - -func (e *EKSMock) ListAccessEntriesPagesWithContext(_ aws.Context, _ *eks.ListAccessEntriesInput, f func(*eks.ListAccessEntriesOutput, bool) bool, _ ...request.Option) error { - defer func() { - if e.Notify != nil { - e.Notify <- struct{}{} - } - }() - accessEntries := make([]*string, 0, len(e.Clusters)) - for _, a := range e.AccessEntries { - accessEntries = append(accessEntries, a.PrincipalArn) - } - f(&eks.ListAccessEntriesOutput{ - AccessEntries: accessEntries, - }, true) - return nil -} - -func (e *EKSMock) DescribeAccessEntryWithContext(_ aws.Context, req *eks.DescribeAccessEntryInput, _ ...request.Option) (*eks.DescribeAccessEntryOutput, error) { - defer func() { - if e.Notify != nil { - e.Notify <- struct{}{} - } - }() - for _, a := range e.AccessEntries { - if aws.StringValue(req.PrincipalArn) == aws.StringValue(a.PrincipalArn) && aws.StringValue(a.ClusterName) == aws.StringValue(req.ClusterName) { - return &eks.DescribeAccessEntryOutput{AccessEntry: a}, nil - } - } - return nil, trace.NotFound("access entry %v not found", aws.StringValue(req.PrincipalArn)) -} - -func (e *EKSMock) ListAssociatedAccessPoliciesPagesWithContext(_ aws.Context, _ *eks.ListAssociatedAccessPoliciesInput, f func(*eks.ListAssociatedAccessPoliciesOutput, bool) bool, _ ...request.Option) error { - defer func() { - if e.Notify != nil { - e.Notify <- struct{}{} - } - }() - - f(&eks.ListAssociatedAccessPoliciesOutput{ - AssociatedAccessPolicies: e.AssociatedPolicies, - }, true) - return nil - -} diff --git a/lib/cloud/mocks/aws_config.go b/lib/cloud/mocks/aws_config.go index b52dfbd36d74a..819d6ca8f535e 100644 --- a/lib/cloud/mocks/aws_config.go +++ b/lib/cloud/mocks/aws_config.go @@ -38,12 +38,12 @@ func (f *AWSConfigProvider) GetConfig(ctx context.Context, region string, optFns if stsClt == nil { stsClt = &STSClient{} } - optFns = append(optFns, + optFns = append([]awsconfig.OptionsFn{ awsconfig.WithOIDCIntegrationClient(f.OIDCIntegrationClient), awsconfig.WithSTSClientProvider( newAssumeRoleClientProviderFunc(stsClt), ), - ) + }, optFns...) return awsconfig.GetConfig(ctx, region, optFns...) } diff --git a/lib/cloud/mocks/aws_sts.go b/lib/cloud/mocks/aws_sts.go index 178a1259669a4..cf117788e696f 100644 --- a/lib/cloud/mocks/aws_sts.go +++ b/lib/cloud/mocks/aws_sts.go @@ -54,6 +54,12 @@ type STSClient struct { recordFn func(roleARN, externalID string) } +func (m *STSClient) GetCallerIdentity(ctx context.Context, params *sts.GetCallerIdentityInput, optFns ...func(*sts.Options)) (*sts.GetCallerIdentityOutput, error) { + return &sts.GetCallerIdentityOutput{ + Arn: aws.String(m.ARN), + }, nil +} + func (m *STSClient) AssumeRoleWithWebIdentity(ctx context.Context, in *sts.AssumeRoleWithWebIdentityInput, _ ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) { m.record(aws.ToString(in.RoleArn), "") expiry := time.Now().Add(60 * time.Minute) diff --git a/lib/integrations/awsoidc/eks_enroll_clusters.go b/lib/integrations/awsoidc/eks_enroll_clusters.go index dbeb6f2385484..d61b062cccfdb 100644 --- a/lib/integrations/awsoidc/eks_enroll_clusters.go +++ b/lib/integrations/awsoidc/eks_enroll_clusters.go @@ -74,8 +74,10 @@ const ( concurrentEKSEnrollingLimit = 5 ) -var agentRepoURL = url.URL{Scheme: "https", Host: "charts.releases.teleport.dev"} -var agentStagingRepoURL = url.URL{Scheme: "https", Host: "charts.releases.development.teleport.dev"} +var ( + agentRepoURL = url.URL{Scheme: "https", Host: "charts.releases.teleport.dev"} + agentStagingRepoURL = url.URL{Scheme: "https", Host: "charts.releases.development.teleport.dev"} +) // EnrollEKSClusterResult contains result for a single EKS cluster enrollment, if it was successful 'Error' will be nil // otherwise it will contain an error happened during enrollment. @@ -462,7 +464,6 @@ func enrollEKSCluster(ctx context.Context, log *slog.Logger, clock clockwork.Clo return "", issueTypeFromCheckAgentInstalledError(err), trace.Wrap(err, "could not check if teleport-kube-agent is already installed.") - } else if alreadyInstalled { return "", // When using EKS Auto Discovery, after the Kube Agent connects to the Teleport cluster, it is ignored in next discovery iterations. @@ -708,7 +709,8 @@ func installKubeAgent(ctx context.Context, cfg installKubeAgentParams) error { if cfg.req.IsCloud && cfg.req.EnableAutoUpgrades { vals["updater"] = map[string]any{"enabled": true, "releaseChannel": "stable/cloud"} - vals["highAvailability"] = map[string]any{"replicaCount": 2, + vals["highAvailability"] = map[string]any{ + "replicaCount": 2, "podDisruptionBudget": map[string]any{"enabled": true, "minAvailable": 1}, } } @@ -716,11 +718,10 @@ func installKubeAgent(ctx context.Context, cfg installKubeAgentParams) error { vals["enterprise"] = true } - eksTags := make(map[string]*string, len(cfg.eksCluster.Tags)) - for k, v := range cfg.eksCluster.Tags { - eksTags[k] = aws.String(v) - } - eksTags[types.OriginLabel] = aws.String(types.OriginCloud) + eksTags := make(map[string]string, len(cfg.eksCluster.Tags)) + maps.Copy(eksTags, cfg.eksCluster.Tags) + eksTags[types.OriginLabel] = types.OriginCloud + kubeCluster, err := common.NewKubeClusterFromAWSEKS(aws.ToString(cfg.eksCluster.Name), aws.ToString(cfg.eksCluster.Arn), eksTags) if err != nil { return trace.Wrap(err) diff --git a/lib/kube/proxy/cluster_details.go b/lib/kube/proxy/cluster_details.go index 1a66ce0562978..e1dbc45fca281 100644 --- a/lib/kube/proxy/cluster_details.go +++ b/lib/kube/proxy/cluster_details.go @@ -26,8 +26,8 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,6 +39,7 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/retryutils" "github.com/gravitational/teleport/lib/cloud" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/cloud/azure" "github.com/gravitational/teleport/lib/cloud/gcp" kubeutils "github.com/gravitational/teleport/lib/kube/utils" @@ -50,6 +51,7 @@ import ( // kubeDetails contain the cluster-related details including authentication. type kubeDetails struct { kubeCreds + // dynamicLabels is the dynamic labels executor for this cluster. dynamicLabels *labels.Dynamic // kubeCluster is the dynamic kube_cluster or a static generated from kubeconfig and that only has the name populated. @@ -86,6 +88,8 @@ type kubeDetails struct { type clusterDetailsConfig struct { // cloudClients is the cloud clients to use for dynamic clusters. cloudClients cloud.Clients + // awsCloudClients provides AWS SDK clients. + awsCloudClients AWSClientGetter // kubeCreds is the credentials to use for the cluster. kubeCreds kubeCreds // cluster is the cluster to create a proxied cluster for. @@ -103,8 +107,10 @@ type clusterDetailsConfig struct { component KubeServiceType } -const defaultRefreshPeriod = 5 * time.Minute -const backoffRefreshStep = 10 * time.Second +const ( + defaultRefreshPeriod = 5 * time.Minute + backoffRefreshStep = 10 * time.Second +) // newClusterDetails creates a proxied kubeDetails structure given a dynamic cluster. func newClusterDetails(ctx context.Context, cfg clusterDetailsConfig) (_ *kubeDetails, err error) { @@ -263,14 +269,20 @@ func (k *kubeDetails) getObjectGVK(resource apiResource) *schema.GroupVersionKin // getKubeClusterCredentials generates kube credentials for dynamic clusters. func getKubeClusterCredentials(ctx context.Context, cfg clusterDetailsConfig) (kubeCreds, error) { - dynCredsCfg := dynamicCredsConfig{kubeCluster: cfg.cluster, log: cfg.log, checker: cfg.checker, resourceMatchers: cfg.resourceMatchers, clock: cfg.clock, component: cfg.component} - switch { + switch dynCredsCfg := (dynamicCredsConfig{ + kubeCluster: cfg.cluster, + log: cfg.log, + checker: cfg.checker, + resourceMatchers: cfg.resourceMatchers, + clock: cfg.clock, + component: cfg.component, + }); { case cfg.cluster.IsKubeconfig(): return getStaticCredentialsFromKubeconfig(ctx, cfg.component, cfg.cluster, cfg.log, cfg.checker) case cfg.cluster.IsAzure(): return getAzureCredentials(ctx, cfg.cloudClients, dynCredsCfg) case cfg.cluster.IsAWS(): - return getAWSCredentials(ctx, cfg.cloudClients, dynCredsCfg) + return getAWSCredentials(ctx, cfg.awsCloudClients, dynCredsCfg) case cfg.cluster.IsGCP(): return getGCPCredentials(ctx, cfg.cloudClients, dynCredsCfg) default: @@ -308,7 +320,7 @@ func azureRestConfigClient(cloudClients cloud.Clients) dynamicCredsClient { } // getAWSCredentials creates a dynamicKubeCreds that generates and updates the access credentials to a EKS kubernetes cluster. -func getAWSCredentials(ctx context.Context, cloudClients cloud.Clients, cfg dynamicCredsConfig) (*dynamicKubeCreds, error) { +func getAWSCredentials(ctx context.Context, cloudClients AWSClientGetter, cfg dynamicCredsConfig) (*dynamicKubeCreds, error) { // create a client that returns the credentials for kubeCluster cfg.client = getAWSClientRestConfig(cloudClients, cfg.clock, cfg.resourceMatchers) creds, err := newDynamicKubeCreds(ctx, cfg) @@ -328,51 +340,66 @@ func getAWSResourceMatcherToCluster(kubeCluster types.KubeCluster, resourceMatch if match, _, _ := services.MatchLabels(matcher.Labels, kubeCluster.GetAllLabels()); !match { continue } - - return &(matcher.AWS) + return &matcher.AWS } return nil } +// STSPresignClient is the subset of the STS presign interface we use in fetchers. +type STSPresignClient = kubeutils.STSPresignClient + +// EKSClient is the subset of the EKS Client interface we use. +type EKSClient interface { + eks.DescribeClusterAPIClient +} + +// AWSClientGetter is an interface for getting an EKS client and an STS client. +type AWSClientGetter interface { + awsconfig.Provider + // GetAWSEKSClient returns AWS EKS client for the specified config. + GetAWSEKSClient(aws.Config) EKSClient + // GetAWSSTSPresignClient returns AWS STS presign client for the specified config. + GetAWSSTSPresignClient(aws.Config) STSPresignClient +} + // getAWSClientRestConfig creates a dynamicCredsClient that generates returns credentials to EKS clusters. -func getAWSClientRestConfig(cloudClients cloud.Clients, clock clockwork.Clock, resourceMatchers []services.ResourceMatcher) dynamicCredsClient { +func getAWSClientRestConfig(cloudClients AWSClientGetter, clock clockwork.Clock, resourceMatchers []services.ResourceMatcher) dynamicCredsClient { return func(ctx context.Context, cluster types.KubeCluster) (*rest.Config, time.Time, error) { region := cluster.GetAWSConfig().Region - opts := []cloud.AWSOptionsFn{ - cloud.WithAmbientCredentials(), - cloud.WithoutSessionCache(), + opts := []awsconfig.OptionsFn{ + awsconfig.WithAmbientCredentials(), } if awsAssume := getAWSResourceMatcherToCluster(cluster, resourceMatchers); awsAssume != nil { - opts = append(opts, cloud.WithAssumeRole(awsAssume.AssumeRoleARN, awsAssume.ExternalID)) + opts = append(opts, awsconfig.WithAssumeRole(awsAssume.AssumeRoleARN, awsAssume.ExternalID)) } - regionalClient, err := cloudClients.GetAWSEKSClient(ctx, region, opts...) + + cfg, err := cloudClients.GetConfig(ctx, region, opts...) if err != nil { return nil, time.Time{}, trace.Wrap(err) } - eksCfg, err := regionalClient.DescribeClusterWithContext(ctx, &eks.DescribeClusterInput{ + regionalClient := cloudClients.GetAWSEKSClient(cfg) + + eksCfg, err := regionalClient.DescribeCluster(ctx, &eks.DescribeClusterInput{ Name: aws.String(cluster.GetAWSConfig().Name), }) if err != nil { return nil, time.Time{}, trace.Wrap(err) } - ca, err := base64.StdEncoding.DecodeString(aws.StringValue(eksCfg.Cluster.CertificateAuthority.Data)) + ca, err := base64.StdEncoding.DecodeString(aws.ToString(eksCfg.Cluster.CertificateAuthority.Data)) if err != nil { return nil, time.Time{}, trace.Wrap(err) } - apiEndpoint := aws.StringValue(eksCfg.Cluster.Endpoint) + apiEndpoint := aws.ToString(eksCfg.Cluster.Endpoint) if len(apiEndpoint) == 0 { return nil, time.Time{}, trace.BadParameter("invalid api endpoint for cluster %q", cluster.GetAWSConfig().Name) } - stsClient, err := cloudClients.GetAWSSTSClient(ctx, region, opts...) - if err != nil { - return nil, time.Time{}, trace.Wrap(err) - } + stsPresignClient := cloudClients.GetAWSSTSPresignClient(cfg) - token, exp, err := kubeutils.GenAWSEKSToken(stsClient, cluster.GetAWSConfig().Name, clock) + token, exp, err := kubeutils.GenAWSEKSToken(ctx, stsPresignClient, cluster.GetAWSConfig().Name, clock) if err != nil { return nil, time.Time{}, trace.Wrap(err) } diff --git a/lib/kube/proxy/kube_creds_test.go b/lib/kube/proxy/kube_creds_test.go index ca4f1bd4b58e0..ca2f537e6de05 100644 --- a/lib/kube/proxy/kube_creds_test.go +++ b/lib/kube/proxy/kube_creds_test.go @@ -26,8 +26,11 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "github.com/stretchr/testify/require" @@ -41,10 +44,65 @@ import ( "github.com/gravitational/teleport/lib/cloud/gcp" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/fixtures" + kubeutils "github.com/gravitational/teleport/lib/kube/utils" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/utils" ) +type mockEKSClientGetter struct { + mocks.AWSConfigProvider + stsPresignClient *mockSTSPresignAPI + eksClient *mockEKSAPI +} + +func (e *mockEKSClientGetter) GetAWSEKSClient(aws.Config) EKSClient { + return e.eksClient +} + +func (e *mockEKSClientGetter) GetAWSSTSPresignClient(aws.Config) kubeutils.STSPresignClient { + return e.stsPresignClient +} + +type mockSTSPresignAPI struct { + url *url.URL +} + +func (a *mockSTSPresignAPI) PresignGetCallerIdentity(ctx context.Context, params *sts.GetCallerIdentityInput, optFns ...func(*sts.PresignOptions)) (*v4.PresignedHTTPRequest, error) { + return &v4.PresignedHTTPRequest{URL: a.url.String()}, nil +} + +type mockEKSAPI struct { + EKSClient + + notify chan struct{} + clusters []*ekstypes.Cluster +} + +func (m *mockEKSAPI) ListClusters(ctx context.Context, req *eks.ListClustersInput, _ ...func(*eks.Options)) (*eks.ListClustersOutput, error) { + defer func() { m.notify <- struct{}{} }() + + var names []string + for _, cluster := range m.clusters { + names = append(names, aws.ToString(cluster.Name)) + } + return &eks.ListClustersOutput{ + Clusters: names, + }, nil +} + +func (m *mockEKSAPI) DescribeCluster(_ context.Context, req *eks.DescribeClusterInput, _ ...func(*eks.Options)) (*eks.DescribeClusterOutput, error) { + defer func() { m.notify <- struct{}{} }() + + for _, cluster := range m.clusters { + if aws.ToString(cluster.Name) == aws.ToString(req.Name) { + return &eks.DescribeClusterOutput{ + Cluster: cluster, + }, nil + } + } + return nil, trace.NotFound("cluster %q not found", aws.ToString(req.Name)) +} + // Test_DynamicKubeCreds tests the dynamic kube credrentials generator for // AWS, GCP, and Azure clusters accessed using their respective IAM credentials. // This test mocks the cloud provider clients and the STS client to generate @@ -99,32 +157,37 @@ func Test_DynamicKubeCreds(t *testing.T) { ) require.NoError(t, err) - // mock sts client + // Mock sts client. u := &url.URL{ Scheme: "https", Host: "sts.amazonaws.com", Path: "/?Action=GetCallerIdentity&Version=2011-06-15", } - sts := &mocks.STSClientV1{ - // u is used to presign the request - // here we just verify the pre-signed request includes this url. - URL: u, - } - // mock clients - cloudclients := &cloud.TestCloudClients{ - STS: sts, - EKS: &mocks.EKSMock{ - Notify: notify, - Clusters: []*eks.Cluster{ + // EKS clients. + eksClients := &mockEKSClientGetter{ + AWSConfigProvider: mocks.AWSConfigProvider{ + STSClient: &mocks.STSClient{}, + }, + stsPresignClient: &mockSTSPresignAPI{ + // u is used to presign the request + // here we just verify the pre-signed request includes this url. + url: u, + }, + eksClient: &mockEKSAPI{ + notify: notify, + clusters: []*ekstypes.Cluster{ { Endpoint: aws.String("https://api.eks.us-west-2.amazonaws.com"), Name: aws.String(awsKube.GetAWSConfig().Name), - CertificateAuthority: &eks.Certificate{ + CertificateAuthority: &ekstypes.Certificate{ Data: aws.String(base64.RawStdEncoding.EncodeToString([]byte(fixtures.TLSCACertPEM))), }, }, }, }, + } + // Mock clients. + cloudclients := &cloud.TestCloudClients{ GCPGKE: &mocks.GKEMock{ Notify: notify, Clock: fakeClock, @@ -204,7 +267,7 @@ func Test_DynamicKubeCreds(t *testing.T) { name: "aws eks cluster without assume role", args: args{ cluster: awsKube, - client: getAWSClientRestConfig(cloudclients, fakeClock, nil), + client: getAWSClientRestConfig(eksClients, fakeClock, nil), validateBearerToken: validateEKSToken, }, wantAddr: "api.eks.us-west-2.amazonaws.com:443", @@ -213,7 +276,7 @@ func Test_DynamicKubeCreds(t *testing.T) { name: "aws eks cluster with unmatched assume role", args: args{ cluster: awsKube, - client: getAWSClientRestConfig(cloudclients, fakeClock, []services.ResourceMatcher{ + client: getAWSClientRestConfig(eksClients, fakeClock, []services.ResourceMatcher{ { Labels: types.Labels{ "rand": []string{"value"}, @@ -233,7 +296,7 @@ func Test_DynamicKubeCreds(t *testing.T) { args: args{ cluster: awsKube, client: getAWSClientRestConfig( - cloudclients, + eksClients, fakeClock, []services.ResourceMatcher{ { @@ -331,6 +394,7 @@ func Test_DynamicKubeCreds(t *testing.T) { } require.NoError(t, got.close()) + sts := eksClients.AWSConfigProvider.STSClient require.Equal(t, tt.wantAssumedRole, apiutils.Deduplicate(sts.GetAssumedRoleARNs())) require.Equal(t, tt.wantExternalIds, apiutils.Deduplicate(sts.GetAssumedRoleExternalIDs())) sts.ResetAssumeRoleHistory() diff --git a/lib/kube/proxy/server.go b/lib/kube/proxy/server.go index 6ac466746b51f..f153039d60749 100644 --- a/lib/kube/proxy/server.go +++ b/lib/kube/proxy/server.go @@ -28,6 +28,9 @@ import ( "sync" "time" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/gravitational/trace" "golang.org/x/net/http2" @@ -38,6 +41,7 @@ import ( "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/authz" "github.com/gravitational/teleport/lib/cloud" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/inventory" "github.com/gravitational/teleport/lib/labels" @@ -74,6 +78,7 @@ type TLSServerConfig struct { OnReconcile func(types.KubeClusters) // CloudClients is a set of cloud clients that Teleport supports. CloudClients cloud.Clients + awsClients *awsClientsGetter // StaticLabels is a map of static labels associated with this service. // Each cluster advertised by this kubernetes_service will include these static labels. // If the service and a cluster define labels with the same key, @@ -106,6 +111,21 @@ type TLSServerConfig struct { InventoryHandle inventory.DownstreamHandle } +type awsClientsGetter struct{} + +func (f *awsClientsGetter) GetConfig(ctx context.Context, region string, optFns ...awsconfig.OptionsFn) (aws.Config, error) { + return awsconfig.GetConfig(ctx, region, optFns...) +} + +func (f *awsClientsGetter) GetAWSEKSClient(cfg aws.Config) EKSClient { + return eks.NewFromConfig(cfg) +} + +func (f *awsClientsGetter) GetAWSSTSPresignClient(cfg aws.Config) STSPresignClient { + stsClient := sts.NewFromConfig(cfg) + return sts.NewPresignClient(stsClient) +} + // CheckAndSetDefaults checks and sets default values func (c *TLSServerConfig) CheckAndSetDefaults() error { if err := c.ForwarderConfig.CheckAndSetDefaults(); err != nil { @@ -142,6 +162,9 @@ func (c *TLSServerConfig) CheckAndSetDefaults() error { } c.CloudClients = cloudClients } + if c.awsClients == nil { + c.awsClients = &awsClientsGetter{} + } if c.ConnectedProxyGetter == nil { c.ConnectedProxyGetter = reversetunnel.NewConnectedProxyGetter() } diff --git a/lib/kube/proxy/watcher.go b/lib/kube/proxy/watcher.go index 56bea639d5260..fd83ddfd1ad60 100644 --- a/lib/kube/proxy/watcher.go +++ b/lib/kube/proxy/watcher.go @@ -174,6 +174,7 @@ func (m *monitoredKubeClusters) get() map[string]types.KubeCluster { func (s *TLSServer) buildClusterDetailsConfigForCluster(cluster types.KubeCluster) clusterDetailsConfig { return clusterDetailsConfig{ cloudClients: s.CloudClients, + awsCloudClients: s.awsClients, cluster: cluster, log: s.log, checker: s.CheckImpersonationPermissions, diff --git a/lib/kube/utils/eks_token_signed.go b/lib/kube/utils/eks_token_signed.go index 4431cf93dad79..1a1840af888ef 100644 --- a/lib/kube/utils/eks_token_signed.go +++ b/lib/kube/utils/eks_token_signed.go @@ -19,44 +19,64 @@ package utils import ( + "context" "encoding/base64" "time" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" ) +// STSPresignClient is the subset of the STS presign client we need to generate EKS tokens. +type STSPresignClient interface { + PresignGetCallerIdentity(ctx context.Context, params *sts.GetCallerIdentityInput, optFns ...func(*sts.PresignOptions)) (*v4.PresignedHTTPRequest, error) +} + // GenAWSEKSToken creates an AWS token to access EKS clusters. // Logic from https://github.com/aws/aws-cli/blob/6c0d168f0b44136fc6175c57c090d4b115437ad1/awscli/customizations/eks/get_token.py#L211-L229 -func GenAWSEKSToken(stsClient stsiface.STSAPI, clusterID string, clock clockwork.Clock) (string, time.Time, error) { +// TODO(@creack): Consolidate with https://github.com/gravitational/teleport/blob/d37da511c944825a47155421bf278777238eecc0/lib/integrations/awsoidc/eks_enroll_clusters.go#L341-L372 +func GenAWSEKSToken(ctx context.Context, stsClient STSPresignClient, clusterID string, clock clockwork.Clock) (string, time.Time, error) { const ( - // The sts GetCallerIdentity request is valid for 15 minutes regardless of this parameters value after it has been - // signed. - requestPresignParam = 60 // The actual token expiration (presigned STS urls are valid for 15 minutes after timestamp in x-amz-date). + expireHeader = "X-Amz-Expires" + expireValue = "60" presignedURLExpiration = 15 * time.Minute v1Prefix = "k8s-aws-v1." clusterIDHeader = "x-k8s-aws-id" ) - // generate an sts:GetCallerIdentity request and add our custom cluster ID header - request, _ := stsClient.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) - request.HTTPRequest.Header.Add(clusterIDHeader, clusterID) - // Sign the request. The expires parameter (sets the x-amz-expires header) is // currently ignored by STS, and the token expires 15 minutes after the x-amz-date // timestamp regardless. We set it to 60 seconds for backwards compatibility (the // parameter is a required argument to Presign(), and authenticators 0.3.0 and older are expecting a value between // 0 and 60 on the server side). // https://github.com/aws/aws-sdk-go/issues/2167 - presignedURLString, err := request.Presign(requestPresignParam) + presignedReq, err := stsClient.PresignGetCallerIdentity(ctx, &sts.GetCallerIdentityInput{}, func(po *sts.PresignOptions) { + po.ClientOptions = append(po.ClientOptions, sts.WithAPIOptions(func(stack *middleware.Stack) error { + return stack.Build.Add(middleware.BuildMiddlewareFunc("AddEKSId", func( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, + ) (middleware.BuildOutput, middleware.Metadata, error) { + switch req := in.Request.(type) { + case *smithyhttp.Request: + query := req.URL.Query() + query.Add(expireHeader, expireValue) + req.URL.RawQuery = query.Encode() + + req.Header.Add(clusterIDHeader, clusterID) + } + return next.HandleBuild(ctx, in) + }), middleware.Before) + })) + }) if err != nil { return "", time.Time{}, trace.Wrap(err) } - // Set token expiration to 1 minute before the presigned URL expires for some cushion + // Set token expiration to 1 minute before the presigned URL expires for some cushion. tokenExpiration := clock.Now().Add(presignedURLExpiration - 1*time.Minute) - return v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(presignedURLString)), tokenExpiration, nil + return v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(presignedReq.URL)), tokenExpiration, nil } diff --git a/lib/srv/db/cloud/iam_test.go b/lib/srv/db/cloud/iam_test.go index d13d1fc74b86c..c3b9ecf3dd716 100644 --- a/lib/srv/db/cloud/iam_test.go +++ b/lib/srv/db/cloud/iam_test.go @@ -416,6 +416,7 @@ func (m *mockAccessPoint) GetClusterName(opts ...services.MarshalOption) (types. ClusterID: "cluster-id", }) } + func (m *mockAccessPoint) AcquireSemaphore(ctx context.Context, params types.AcquireSemaphoreRequest) (*types.SemaphoreLease, error) { return &types.SemaphoreLease{ SemaphoreKind: params.SemaphoreKind, @@ -424,6 +425,7 @@ func (m *mockAccessPoint) AcquireSemaphore(ctx context.Context, params types.Acq Expires: params.Expires, }, nil } + func (m *mockAccessPoint) CancelSemaphoreLease(ctx context.Context, lease types.SemaphoreLease) error { return nil } diff --git a/lib/srv/db/common/auth_test.go b/lib/srv/db/common/auth_test.go index ae136b4d53c46..63d79af27e500 100644 --- a/lib/srv/db/common/auth_test.go +++ b/lib/srv/db/common/auth_test.go @@ -957,8 +957,7 @@ func generateAzureVM(t *testing.T, identities []string) armcompute.VirtualMachin } // authClientMock is a mock that implements AuthClient interface. -type authClientMock struct { -} +type authClientMock struct{} // GenerateDatabaseCert generates a cert using fixtures TLS CA. func (m *authClientMock) GenerateDatabaseCert(ctx context.Context, req *proto.DatabaseCertRequest) (*proto.DatabaseCertResponse, error) { @@ -996,8 +995,7 @@ func (m *authClientMock) GenerateDatabaseCert(ctx context.Context, req *proto.Da }, nil } -type accessPointMock struct { -} +type accessPointMock struct{} // GetAuthPreference always returns types.DefaultAuthPreference(). func (m accessPointMock) GetAuthPreference(ctx context.Context) (types.AuthPreference, error) { diff --git a/lib/srv/discovery/access_graph.go b/lib/srv/discovery/access_graph.go index 4bc207b21df01..9d6d344ac9fda 100644 --- a/lib/srv/discovery/access_graph.go +++ b/lib/srv/discovery/access_graph.go @@ -502,6 +502,7 @@ func (s *Server) accessGraphFetchersFromMatchers(ctx context.Context, matchers M ctx, aws_sync.Config{ CloudClients: s.CloudClients, + GetEKSClient: s.GetAWSSyncEKSClient, GetEC2Client: s.GetEC2Client, AssumeRole: assumeRole, Regions: awsFetcher.Regions, diff --git a/lib/srv/discovery/common/kubernetes.go b/lib/srv/discovery/common/kubernetes.go index 9c383a6213fda..1bddd210493da 100644 --- a/lib/srv/discovery/common/kubernetes.go +++ b/lib/srv/discovery/common/kubernetes.go @@ -24,7 +24,6 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aws/aws-sdk-go/aws" "github.com/gravitational/trace" "github.com/gravitational/teleport/api/types" @@ -40,7 +39,7 @@ func setAWSKubeName(meta types.Metadata, firstNamePart string, extraNameParts .. } // NewKubeClusterFromAWSEKS creates a kube_cluster resource from an EKS cluster. -func NewKubeClusterFromAWSEKS(clusterName, clusterArn string, tags map[string]*string) (types.KubeCluster, error) { +func NewKubeClusterFromAWSEKS(clusterName, clusterArn string, tags map[string]string) (types.KubeCluster, error) { parsedARN, err := arn.Parse(clusterArn) if err != nil { return nil, trace.Wrap(err) @@ -64,7 +63,7 @@ func NewKubeClusterFromAWSEKS(clusterName, clusterArn string, tags map[string]*s } // labelsFromAWSKubeClusterTags creates kube cluster labels. -func labelsFromAWSKubeClusterTags(tags map[string]*string, parsedARN arn.ARN) map[string]string { +func labelsFromAWSKubeClusterTags(tags map[string]string, parsedARN arn.ARN) map[string]string { labels := awsEKSTagsToLabels(tags) labels[types.CloudLabel] = types.CloudAWS labels[types.DiscoveryLabelRegion] = parsedARN.Region @@ -74,11 +73,11 @@ func labelsFromAWSKubeClusterTags(tags map[string]*string, parsedARN arn.ARN) ma } // awsEKSTagsToLabels converts AWS tags to a labels map. -func awsEKSTagsToLabels(tags map[string]*string) map[string]string { +func awsEKSTagsToLabels(tags map[string]string) map[string]string { labels := make(map[string]string) for key, val := range tags { if types.IsValidLabelKey(key) { - labels[key] = aws.StringValue(val) + labels[key] = val } else { slog.DebugContext(context.Background(), "Skipping EKS tag that is not a valid label key", "tag", key) } diff --git a/lib/srv/discovery/common/kubernetes_test.go b/lib/srv/discovery/common/kubernetes_test.go index b121c624a1e76..868f9dfac9370 100644 --- a/lib/srv/discovery/common/kubernetes_test.go +++ b/lib/srv/discovery/common/kubernetes_test.go @@ -20,8 +20,8 @@ import ( "testing" "cloud.google.com/go/container/apiv1/containerpb" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -97,16 +97,16 @@ func TestNewKubeClusterFromAWSEKS(t *testing.T) { }) require.NoError(t, err) - cluster := &eks.Cluster{ + cluster := &ekstypes.Cluster{ Name: aws.String("cluster1"), Arn: aws.String("arn:aws:eks:eu-west-1:123456789012:cluster/cluster1"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - overrideLabel: aws.String("override-1"), - "env": aws.String("prod"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + overrideLabel: "override-1", + "env": "prod", }, } - actual, err := NewKubeClusterFromAWSEKS(aws.StringValue(cluster.Name), aws.StringValue(cluster.Arn), cluster.Tags) + actual, err := NewKubeClusterFromAWSEKS(aws.ToString(cluster.Name), aws.ToString(cluster.Arn), cluster.Tags) require.NoError(t, err) require.Empty(t, cmp.Diff(expected, actual)) require.NoError(t, err) diff --git a/lib/srv/discovery/common/renaming_test.go b/lib/srv/discovery/common/renaming_test.go index b01825725f672..5be2c13f3b3c4 100644 --- a/lib/srv/discovery/common/renaming_test.go +++ b/lib/srv/discovery/common/renaming_test.go @@ -27,8 +27,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/mysql/armmysqlflexibleservers" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redis/armredis/v3" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redisenterprise/armredisenterprise" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" "github.com/aws/aws-sdk-go/service/rds" "github.com/google/uuid" "github.com/stretchr/testify/require" @@ -498,12 +498,12 @@ func labelsToAzureTags(labels map[string]string) map[string]*string { func makeEKSKubeCluster(t *testing.T, name, region, accountID, overrideLabel string) types.KubeCluster { t.Helper() - eksCluster := &eks.Cluster{ + eksCluster := &ekstypes.Cluster{ Name: aws.String(name), Arn: aws.String(fmt.Sprintf("arn:aws:eks:%s:%s:cluster/%s", region, accountID, name)), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - overrideLabel: aws.String(name), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + overrideLabel: name, }, } kubeCluster, err := NewKubeClusterFromAWSEKS(aws.StringValue(eksCluster.Name), aws.StringValue(eksCluster.Arn), eksCluster.Tags) diff --git a/lib/srv/discovery/discovery.go b/lib/srv/discovery/discovery.go index f37ba025d2450..047553edeabde 100644 --- a/lib/srv/discovery/discovery.go +++ b/lib/srv/discovery/discovery.go @@ -32,8 +32,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/aws/aws-sdk-go-v2/service/ssm" ssmtypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/aws/aws-sdk-go/aws/session" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" @@ -115,10 +117,18 @@ type gcpInstaller interface { type Config struct { // CloudClients is an interface for retrieving cloud clients. CloudClients cloud.Clients + + // AWSFetchersClients gets the AWS clients for the given region for the fetchers. + AWSFetchersClients fetchers.AWSClientGetter + + // GetAWSSyncEKSClient gets an AWS EKS client for the given region for fetchers/aws-sync. + GetAWSSyncEKSClient aws_sync.EKSClientGetter + // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider // AWSDatabaseFetcherFactory provides AWS database fetchers AWSDatabaseFetcherFactory *db.AWSFetcherFactory + // GetEC2Client gets an AWS EC2 client for the given region. GetEC2Client server.EC2ClientGetter // GetSSMClient gets an AWS SSM client for the given region. @@ -196,6 +206,23 @@ type AccessGraphConfig struct { Insecure bool } +type awsFetchersClientsGetter struct { + awsconfig.Provider +} + +func (f *awsFetchersClientsGetter) GetAWSEKSClient(cfg aws.Config) fetchers.EKSClient { + return eks.NewFromConfig(cfg) +} + +func (f *awsFetchersClientsGetter) GetAWSSTSClient(cfg aws.Config) fetchers.STSClient { + return sts.NewFromConfig(cfg) +} + +func (f *awsFetchersClientsGetter) GetAWSSTSPresignClient(cfg aws.Config) fetchers.STSPresignClient { + stsClient := sts.NewFromConfig(cfg) + return sts.NewPresignClient(stsClient) +} + func (c *Config) CheckAndSetDefaults() error { if c.Matchers.IsEmpty() && c.DiscoveryGroup == "" { return trace.BadParameter("no matchers or discovery group configured for discovery") @@ -253,6 +280,20 @@ kubernetes matchers are present.`) return ec2.NewFromConfig(cfg), nil } } + if c.AWSFetchersClients == nil { + c.AWSFetchersClients = &awsFetchersClientsGetter{ + Provider: awsconfig.ProviderFunc(c.getAWSConfig), + } + } + if c.GetAWSSyncEKSClient == nil { + c.GetAWSSyncEKSClient = func(ctx context.Context, region string, opts ...awsconfig.OptionsFn) (aws_sync.EKSClient, error) { + cfg, err := c.getAWSConfig(ctx, region, opts...) + if err != nil { + return nil, trace.Wrap(err) + } + return eks.NewFromConfig(cfg), nil + } + } if c.GetSSMClient == nil { c.GetSSMClient = func(ctx context.Context, region string, opts ...awsconfig.OptionsFn) (server.SSMClient, error) { cfg, err := c.getAWSConfig(ctx, region, opts...) @@ -561,7 +602,7 @@ func (s *Server) initAWSWatchers(matchers []types.AWSMatcher) error { _, otherMatchers = splitMatchers(otherMatchers, db.IsAWSMatcherType) // Add non-integration kube fetchers. - kubeFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.CloudClients, otherMatchers, noDiscoveryConfig) + kubeFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.AWSFetchersClients, otherMatchers, noDiscoveryConfig) if err != nil { return trace.Wrap(err) } @@ -714,12 +755,12 @@ func (s *Server) databaseFetchersFromMatchers(matchers Matchers, discoveryConfig func (s *Server) kubeFetchersFromMatchers(matchers Matchers, discoveryConfigName string) ([]common.Fetcher, error) { var result []common.Fetcher - // AWS + // AWS. awsKubeMatchers, _ := splitMatchers(matchers.AWS, func(matcherType string) bool { return matcherType == types.AWSMatcherEKS }) if len(awsKubeMatchers) > 0 { - eksFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.CloudClients, awsKubeMatchers, discoveryConfigName) + eksFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.AWSFetchersClients, awsKubeMatchers, discoveryConfigName) if err != nil { return nil, trace.Wrap(err) } @@ -1264,7 +1305,6 @@ func (s *Server) filterExistingAzureNodes(instances *server.AzureInstances) erro _, vmOK := labels[types.VMIDLabel] return subscriptionOK && vmOK }) - if err != nil { return trace.Wrap(err) } @@ -1357,7 +1397,6 @@ func (s *Server) filterExistingGCPNodes(instances *server.GCPInstances) error { _, nameOK := labels[types.NameLabelDiscovery] return projectIDOK && zoneOK && nameOK }) - if err != nil { return trace.Wrap(err) } diff --git a/lib/srv/discovery/discovery_test.go b/lib/srv/discovery/discovery_test.go index 865517ba4c33c..3eea560f67174 100644 --- a/lib/srv/discovery/discovery_test.go +++ b/lib/srv/discovery/discovery_test.go @@ -36,17 +36,15 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redis/armredis/v3" - awsv2 "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go-v2/service/redshift" redshifttypes "github.com/aws/aws-sdk-go-v2/service/redshift/types" "github.com/aws/aws-sdk-go-v2/service/ssm" ssmtypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/rds" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -86,6 +84,7 @@ import ( "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/srv/discovery/common" + "github.com/gravitational/teleport/lib/srv/discovery/fetchers" "github.com/gravitational/teleport/lib/srv/discovery/fetchers/db" "github.com/gravitational/teleport/lib/srv/server" usagereporter "github.com/gravitational/teleport/lib/usagereporter/teleport" @@ -175,10 +174,10 @@ func genEC2Instances(n int) []ec2types.Instance { var ec2Instances []ec2types.Instance for _, id := range genEC2InstanceIDs(n) { ec2Instances = append(ec2Instances, ec2types.Instance{ - InstanceId: awsv2.String(id), + InstanceId: aws.String(id), Tags: []ec2types.Tag{{ - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }}, State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, @@ -324,11 +323,12 @@ func TestDiscoveryServer(t *testing.T) { tcs := []struct { name string - // presentInstances is a list of servers already present in teleport + // presentInstances is a list of servers already present in teleport. presentInstances []types.Server foundEC2Instances []ec2types.Instance ssm *mockSSMClient emitter *mockEmitter + eksClusters []*ekstypes.Cluster eksEnroller eksClustersEnroller discoveryConfig *discoveryconfig.DiscoveryConfig staticMatchers Matchers @@ -339,14 +339,14 @@ func TestDiscoveryServer(t *testing.T) { ssmRunError error }{ { - name: "no nodes present, 1 found ", + name: "no nodes present, 1 found", presentInstances: []types.Server{}, foundEC2Instances: []ec2types.Instance{ { - InstanceId: awsv2.String("instance-id-1"), + InstanceId: aws.String("instance-id-1"), Tags: []ec2types.Tag{{ - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }}, State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, @@ -356,7 +356,7 @@ func TestDiscoveryServer(t *testing.T) { ssm: &mockSSMClient{ commandOutput: &ssm.SendCommandOutput{ Command: &ssmtypes.Command{ - CommandId: awsv2.String("command-id-1"), + CommandId: aws.String("command-id-1"), }, }, invokeOutput: &ssm.GetCommandInvocationOutput{ @@ -401,10 +401,10 @@ func TestDiscoveryServer(t *testing.T) { }, foundEC2Instances: []ec2types.Instance{ { - InstanceId: awsv2.String("instance-id-1"), + InstanceId: aws.String("instance-id-1"), Tags: []ec2types.Tag{{ - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }}, State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, @@ -414,7 +414,7 @@ func TestDiscoveryServer(t *testing.T) { ssm: &mockSSMClient{ commandOutput: &ssm.SendCommandOutput{ Command: &ssmtypes.Command{ - CommandId: awsv2.String("command-id-1"), + CommandId: aws.String("command-id-1"), }, }, invokeOutput: &ssm.GetCommandInvocationOutput{ @@ -442,10 +442,10 @@ func TestDiscoveryServer(t *testing.T) { }, foundEC2Instances: []ec2types.Instance{ { - InstanceId: awsv2.String("instance-id-1"), + InstanceId: aws.String("instance-id-1"), Tags: []ec2types.Tag{{ - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }}, State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, @@ -455,7 +455,7 @@ func TestDiscoveryServer(t *testing.T) { ssm: &mockSSMClient{ commandOutput: &ssm.SendCommandOutput{ Command: &ssmtypes.Command{ - CommandId: awsv2.String("command-id-1"), + CommandId: aws.String("command-id-1"), }, }, invokeOutput: &ssm.GetCommandInvocationOutput{ @@ -474,7 +474,7 @@ func TestDiscoveryServer(t *testing.T) { ssm: &mockSSMClient{ commandOutput: &ssm.SendCommandOutput{ Command: &ssmtypes.Command{ - CommandId: awsv2.String("command-id-1"), + CommandId: aws.String("command-id-1"), }, }, invokeOutput: &ssm.GetCommandInvocationOutput{ @@ -491,10 +491,10 @@ func TestDiscoveryServer(t *testing.T) { presentInstances: []types.Server{}, foundEC2Instances: []ec2types.Instance{ { - InstanceId: awsv2.String("instance-id-1"), + InstanceId: aws.String("instance-id-1"), Tags: []ec2types.Tag{{ - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }}, State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, @@ -504,7 +504,7 @@ func TestDiscoveryServer(t *testing.T) { ssm: &mockSSMClient{ commandOutput: &ssm.SendCommandOutput{ Command: &ssmtypes.Command{ - CommandId: awsv2.String("command-id-1"), + CommandId: aws.String("command-id-1"), }, }, invokeOutput: &ssm.GetCommandInvocationOutput{ @@ -538,10 +538,10 @@ func TestDiscoveryServer(t *testing.T) { presentInstances: []types.Server{}, foundEC2Instances: []ec2types.Instance{ { - InstanceId: awsv2.String("instance-id-1"), + InstanceId: aws.String("instance-id-1"), Tags: []ec2types.Tag{{ - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }}, State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, @@ -551,7 +551,7 @@ func TestDiscoveryServer(t *testing.T) { ssm: &mockSSMClient{ commandOutput: &ssm.SendCommandOutput{ Command: &ssmtypes.Command{ - CommandId: awsv2.String("command-id-1"), + CommandId: aws.String("command-id-1"), }, }, invokeOutput: &ssm.GetCommandInvocationOutput{ @@ -625,10 +625,10 @@ func TestDiscoveryServer(t *testing.T) { presentInstances: []types.Server{}, foundEC2Instances: []ec2types.Instance{ { - InstanceId: awsv2.String("instance-id-1"), + InstanceId: aws.String("instance-id-1"), Tags: []ec2types.Tag{{ - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }}, State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, @@ -638,7 +638,7 @@ func TestDiscoveryServer(t *testing.T) { ssm: &mockSSMClient{ commandOutput: &ssm.SendCommandOutput{ Command: &ssmtypes.Command{ - CommandId: awsv2.String("command-id-1"), + CommandId: aws.String("command-id-1"), }, }, invokeOutput: &ssm.GetCommandInvocationOutput{ @@ -667,7 +667,7 @@ func TestDiscoveryServer(t *testing.T) { staticMatchers: Matchers{}, discoveryConfig: discoveryConfigForUserTaskEC2Test, wantInstalledInstances: []string{}, - userTasksDiscoverCheck: func(tt require.TestingT, i1 interface{}, i2 ...interface{}) { + userTasksDiscoverCheck: func(t require.TestingT, i1 interface{}, i2 ...interface{}) { existingTasks, ok := i1.([]*usertasksv1.UserTask) require.True(t, ok, "failed to get existing tasks: %T", i1) require.Len(t, existingTasks, 1) @@ -693,26 +693,21 @@ func TestDiscoveryServer(t *testing.T) { presentInstances: []types.Server{}, foundEC2Instances: []ec2types.Instance{}, ssm: &mockSSMClient{}, - cloudClients: &cloud.TestCloudClients{ - STS: &mocks.STSClientV1{}, - EKS: &mocks.EKSMock{ - Clusters: []*eks.Cluster{ - { - Name: aws.String("cluster01"), - Arn: aws.String("arn:aws:eks:us-west-2:123456789012:cluster/cluster01"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "RunDiscover": aws.String("Please"), - }, - }, - { - Name: aws.String("cluster02"), - Arn: aws.String("arn:aws:eks:us-west-2:123456789012:cluster/cluster02"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "RunDiscover": aws.String("Please"), - }, - }, + eksClusters: []*ekstypes.Cluster{ + { + Name: aws.String("cluster01"), + Arn: aws.String("arn:aws:eks:us-west-2:123456789012:cluster/cluster01"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "RunDiscover": "Please", + }, + }, + { + Name: aws.String("cluster02"), + Arn: aws.String("arn:aws:eks:us-west-2:123456789012:cluster/cluster02"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "RunDiscover": "Please", }, }, }, @@ -737,7 +732,7 @@ func TestDiscoveryServer(t *testing.T) { staticMatchers: Matchers{}, discoveryConfig: discoveryConfigForUserTaskEKSTest, wantInstalledInstances: []string{}, - userTasksDiscoverCheck: func(tt require.TestingT, i1 interface{}, i2 ...interface{}) { + userTasksDiscoverCheck: func(t require.TestingT, i1 interface{}, i2 ...interface{}) { existingTasks, ok := i1.([]*usertasksv1.UserTask) require.True(t, ok, "failed to get existing tasks: %T", i1) require.Len(t, existingTasks, 1) @@ -761,20 +756,21 @@ func TestDiscoveryServer(t *testing.T) { } for _, tc := range tcs { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() + ctx := context.Background() - ec2Client := &mockEC2Client{output: &ec2.DescribeInstancesOutput{ - Reservations: []ec2types.Reservation{ - { - OwnerId: awsv2.String("owner"), - Instances: tc.foundEC2Instances, + ec2Client := &mockEC2Client{ + output: &ec2.DescribeInstancesOutput{ + Reservations: []ec2types.Reservation{ + { + OwnerId: aws.String("owner"), + Instances: tc.foundEC2Instances, + }, }, }, - }} + } - ctx := context.Background() // Create and start test auth server. testAuthServer, err := auth.NewTestAuthServer(auth.TestAuthServerConfig{ Dir: t.TempDir(), @@ -782,9 +778,24 @@ func TestDiscoveryServer(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, testAuthServer.Close()) }) + awsOIDCIntegration, err := types.NewIntegrationAWSOIDC(types.Metadata{ + Name: "my-integration", + }, &types.AWSOIDCIntegrationSpecV1{ + RoleARN: "arn:aws:iam::123456789012:role/teleport", + }) + require.NoError(t, err) + testAuthServer.AuthServer.IntegrationsTokenGenerator = &mockIntegrationsTokenGenerator{ + proxies: nil, + integrations: map[string]types.Integration{ + awsOIDCIntegration.GetName(): awsOIDCIntegration, + }, + } + tlsServer, err := testAuthServer.NewTestTLSServer() require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tlsServer.Close()) }) + _, err = tlsServer.Auth().CreateIntegration(ctx, awsOIDCIntegration) + require.NoError(t, err) // Auth client for discovery service. identity := auth.TestServerID(types.RoleDiscovery, "hostID") @@ -816,6 +827,9 @@ func TestDiscoveryServer(t *testing.T) { eksEnroller = tc.eksEnroller } + fakeConfigProvider := mocks.AWSConfigProvider{ + OIDCIntegrationClient: tlsServer.Auth(), + } server, err := New(authz.ContextWithUser(context.Background(), identity.I), &Config{ GetEC2Client: func(ctx context.Context, region string, opts ...awsconfig.OptionsFn) (ec2.DescribeInstancesAPIClient, error) { return ec2Client, nil @@ -823,6 +837,11 @@ func TestDiscoveryServer(t *testing.T) { GetSSMClient: func(ctx context.Context, region string, opts ...awsconfig.OptionsFn) (server.SSMClient, error) { return tc.ssm, nil }, + AWSConfigProvider: &fakeConfigProvider, + AWSFetchersClients: &mockFetchersClients{ + AWSConfigProvider: fakeConfigProvider, + eksClusters: tc.eksClusters, + }, ClusterFeatures: func() proto.Features { return proto.Features{} }, KubernetesClient: fake.NewSimpleClientset(), AccessPoint: getDiscoveryAccessPointWithEKSEnroller(tlsServer.Auth(), authClient, eksEnroller), @@ -916,20 +935,20 @@ func TestDiscoveryServerConcurrency(t *testing.T) { output: &ec2.DescribeInstancesOutput{ Reservations: []ec2types.Reservation{ { - OwnerId: awsv2.String("123456789012"), + OwnerId: aws.String("123456789012"), Instances: []ec2types.Instance{ { - InstanceId: awsv2.String("i-123456789012"), + InstanceId: aws.String("i-123456789012"), Tags: []ec2types.Tag{ { - Key: awsv2.String("env"), - Value: awsv2.String("dev"), + Key: aws.String("env"), + Value: aws.String("dev"), }, }, - PrivateIpAddress: awsv2.String("172.0.1.2"), - VpcId: awsv2.String("vpcId"), - SubnetId: awsv2.String("subnetId"), - PrivateDnsName: awsv2.String("privateDnsName"), + PrivateIpAddress: aws.String("172.0.1.2"), + VpcId: aws.String("vpcId"), + SubnetId: aws.String("subnetId"), + PrivateDnsName: aws.String("privateDnsName"), State: &ec2types.InstanceState{ Name: ec2types.InstanceStateNameRunning, }, @@ -1212,11 +1231,12 @@ func TestDiscoveryKubeServices(t *testing.T) { } func TestDiscoveryInCloudKube(t *testing.T) { + t.Parallel() + const ( mainDiscoveryGroup = "main" otherDiscoveryGroup = "other" ) - t.Parallel() tcs := []struct { name string existingKubeClusters []types.KubeCluster @@ -1440,15 +1460,11 @@ func TestDiscoveryInCloudKube(t *testing.T) { } for _, tc := range tcs { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - sts := &mocks.STSClientV1{} testCloudClients := &cloud.TestCloudClients{ - STS: sts, AzureAKSClient: newPopulatedAKSMock(), - EKS: newPopulatedEKSMock(), GCPGKE: newPopulatedGCPMock(), GCPProjects: newPopulatedGCPProjectsMock(), } @@ -1475,7 +1491,7 @@ func TestDiscoveryInCloudKube(t *testing.T) { err := tlsServer.Auth().CreateKubernetesCluster(ctx, kubeCluster) require.NoError(t, err) } - // we analyze the logs emitted by discovery service to detect clusters that were not updated + // We analyze the logs emitted by discovery service to detect clusters that were not updated // because their state didn't change. r, w := io.Pipe() t.Cleanup(func() { @@ -1506,15 +1522,26 @@ func TestDiscoveryInCloudKube(t *testing.T) { } } }() + reporter := &mockUsageReporter{} tlsServer.Auth().SetUsageReporter(reporter) + + mockedClients := &mockFetchersClients{ + AWSConfigProvider: mocks.AWSConfigProvider{ + STSClient: &mocks.STSClient{}, + OIDCIntegrationClient: newFakeAccessPoint(), + }, + eksClusters: newPopulatedEKSMock().clusters, + } + discServer, err := New( authz.ContextWithUser(ctx, identity.I), &Config{ - CloudClients: testCloudClients, - ClusterFeatures: func() proto.Features { return proto.Features{} }, - KubernetesClient: fake.NewSimpleClientset(), - AccessPoint: getDiscoveryAccessPoint(tlsServer.Auth(), authClient), + CloudClients: testCloudClients, + AWSFetchersClients: mockedClients, + ClusterFeatures: func() proto.Features { return proto.Features{} }, + KubernetesClient: fake.NewSimpleClientset(), + AccessPoint: getDiscoveryAccessPoint(tlsServer.Auth(), authClient), Matchers: Matchers{ AWS: tc.awsMatchers, Azure: tc.azureMatchers, @@ -1524,12 +1551,9 @@ func TestDiscoveryInCloudKube(t *testing.T) { Log: logger, DiscoveryGroup: mainDiscoveryGroup, }) - require.NoError(t, err) - t.Cleanup(func() { - discServer.Stop() - }) + t.Cleanup(discServer.Stop) go discServer.Start() clustersNotUpdatedMap := sliceToSet(tc.clustersNotUpdated) @@ -1562,8 +1586,8 @@ func TestDiscoveryInCloudKube(t *testing.T) { return len(clustersNotUpdated) == 0 && clustersFoundInAuth }, 5*time.Second, 200*time.Millisecond) - require.ElementsMatch(t, tc.expectedAssumedRoles, sts.GetAssumedRoleARNs(), "roles incorrectly assumed") - require.ElementsMatch(t, tc.expectedExternalIDs, sts.GetAssumedRoleExternalIDs(), "external IDs incorrectly assumed") + require.ElementsMatch(t, tc.expectedAssumedRoles, mockedClients.STSClient.GetAssumedRoleARNs(), "roles incorrectly assumed") + require.ElementsMatch(t, tc.expectedExternalIDs, mockedClients.STSClient.GetAssumedRoleExternalIDs(), "external IDs incorrectly assumed") if tc.wantEvents > 0 { require.Eventually(t, func() bool { @@ -1582,14 +1606,15 @@ func TestDiscoveryServer_New(t *testing.T) { t.Parallel() testCases := []struct { desc string - cloudClients cloud.Clients + cloudClients fetchers.AWSClientGetter matchers Matchers errAssertion require.ErrorAssertionFunc discServerAssertion require.ValueAssertionFunc }{ { - desc: "no matchers error", - cloudClients: &cloud.TestCloudClients{STS: &mocks.STSClientV1{}}, + desc: "no matchers error", + + cloudClients: &mockFetchersClients{}, matchers: Matchers{}, errAssertion: func(t require.TestingT, err error, i ...interface{}) { require.ErrorIs(t, err, &trace.BadParameterError{Message: "no matchers or discovery group configured for discovery"}) @@ -1597,8 +1622,10 @@ func TestDiscoveryServer_New(t *testing.T) { discServerAssertion: require.Nil, }, { - desc: "success with EKS matcher", - cloudClients: &cloud.TestCloudClients{STS: &mocks.STSClientV1{}, EKS: &mocks.EKSMock{}}, + desc: "success with EKS matcher", + + cloudClients: &mockFetchersClients{}, + matchers: Matchers{ AWS: []types.AWSMatcher{ { @@ -1621,11 +1648,8 @@ func TestDiscoveryServer_New(t *testing.T) { }, }, { - desc: "EKS fetcher is skipped on initialization error (missing region)", - cloudClients: &cloud.TestCloudClients{ - STS: &mocks.STSClientV1{}, - EKS: &mocks.EKSMock{}, - }, + desc: "EKS fetcher is skipped on initialization error (missing region)", + cloudClients: &mockFetchersClients{}, matchers: Matchers{ AWS: []types.AWSMatcher{ { @@ -1666,12 +1690,12 @@ func TestDiscoveryServer_New(t *testing.T) { discServer, err := New( ctx, &Config{ - CloudClients: tt.cloudClients, - ClusterFeatures: func() proto.Features { return proto.Features{} }, - AccessPoint: newFakeAccessPoint(), - Matchers: tt.matchers, - Emitter: &mockEmitter{}, - protocolChecker: &noopProtocolChecker{}, + AWSFetchersClients: tt.cloudClients, + ClusterFeatures: func() proto.Features { return proto.Features{} }, + AccessPoint: newFakeAccessPoint(), + Matchers: tt.matchers, + Emitter: &mockEmitter{}, + protocolChecker: &noopProtocolChecker{}, }) tt.errAssertion(t, err) @@ -1759,28 +1783,33 @@ var aksMockClusters = map[string][]*azure.AKSCluster{ } type mockEKSAPI struct { - eksiface.EKSAPI - clusters []*eks.Cluster + fetchers.EKSClient + clusters []*ekstypes.Cluster } -func (m *mockEKSAPI) ListClustersPagesWithContext(ctx aws.Context, req *eks.ListClustersInput, f func(*eks.ListClustersOutput, bool) bool, _ ...request.Option) error { - var names []*string +func (m *mockEKSAPI) ListClusters(ctx context.Context, req *eks.ListClustersInput, _ ...func(*eks.Options)) (*eks.ListClustersOutput, error) { + var names []string for _, cluster := range m.clusters { - names = append(names, cluster.Name) + names = append(names, aws.ToString(cluster.Name)) } - f(&eks.ListClustersOutput{ - Clusters: names[:len(names)/2], - }, false) - f(&eks.ListClustersOutput{ + // First call, no NextToken. Return first half and a NextToken value. + if req.NextToken == nil { + return &eks.ListClustersOutput{ + Clusters: names[:len(names)/2], + NextToken: aws.String("next"), + }, nil + } + + // Second call, we have a NextToken, return the second half. + return &eks.ListClustersOutput{ Clusters: names[len(names)/2:], - }, true) - return nil + }, nil } -func (m *mockEKSAPI) DescribeClusterWithContext(_ aws.Context, req *eks.DescribeClusterInput, _ ...request.Option) (*eks.DescribeClusterOutput, error) { +func (m *mockEKSAPI) DescribeCluster(_ context.Context, req *eks.DescribeClusterInput, _ ...func(*eks.Options)) (*eks.DescribeClusterOutput, error) { for _, cluster := range m.clusters { - if aws.StringValue(cluster.Name) == aws.StringValue(req.Name) { + if aws.ToString(cluster.Name) == aws.ToString(req.Name) { return &eks.DescribeClusterOutput{ Cluster: cluster, }, nil @@ -1795,48 +1824,70 @@ func newPopulatedEKSMock() *mockEKSAPI { } } -var eksMockClusters = []*eks.Cluster{ +type mockFetchersClients struct { + mocks.AWSConfigProvider + eksClusters []*ekstypes.Cluster +} + +func (m *mockFetchersClients) GetAWSEKSClient(aws.Config) fetchers.EKSClient { + return &mockEKSAPI{ + clusters: m.eksClusters, + } +} + +func (m *mockFetchersClients) GetAWSSTSClient(aws.Config) fetchers.STSClient { + if m.AWSConfigProvider.STSClient != nil { + return m.AWSConfigProvider.STSClient + } + return &mocks.STSClient{} +} + +func (m *mockFetchersClients) GetAWSSTSPresignClient(aws.Config) fetchers.STSPresignClient { + return nil +} + +var eksMockClusters = []*ekstypes.Cluster{ { Name: aws.String("eks-cluster1"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster1"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("prod"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "prod", + "location": "eu-west-1", }, }, { Name: aws.String("eks-cluster2"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster2"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("prod"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "prod", + "location": "eu-west-1", }, }, { Name: aws.String("eks-cluster3"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster3"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("stg"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "stg", + "location": "eu-west-1", }, }, { Name: aws.String("eks-cluster4"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster1"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("stg"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "stg", + "location": "eu-west-1", }, }, } -func mustConvertEKSToKubeCluster(t *testing.T, eksCluster *eks.Cluster, discoveryParams rewriteDiscoveryLabelsParams) types.KubeCluster { - cluster, err := common.NewKubeClusterFromAWSEKS(aws.StringValue(eksCluster.Name), aws.StringValue(eksCluster.Arn), eksCluster.Tags) +func mustConvertEKSToKubeCluster(t *testing.T, eksCluster *ekstypes.Cluster, discoveryParams rewriteDiscoveryLabelsParams) types.KubeCluster { + cluster, err := common.NewKubeClusterFromAWSEKS(aws.ToString(eksCluster.Name), aws.ToString(eksCluster.Arn), eksCluster.Tags) require.NoError(t, err) discoveryParams.matcherType = types.AWSMatcherEKS rewriteCloudResource(t, cluster, discoveryParams) @@ -2027,9 +2078,6 @@ func TestDiscoveryDatabase(t *testing.T) { &azure.ARMRedisEnterpriseClusterMock{}, &azure.ARMRedisEnterpriseDatabaseMock{}, ), - EKS: &mocks.EKSMock{ - Clusters: []*eks.Cluster{eksAWSResource}, - }, } tcs := []struct { @@ -2303,7 +2351,6 @@ func TestDiscoveryDatabase(t *testing.T) { } for _, tc := range tcs { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -2370,12 +2417,16 @@ func TestDiscoveryDatabase(t *testing.T) { authz.ContextWithUser(ctx, identity.I), &Config{ IntegrationOnlyCredentials: integrationOnlyCredential, - CloudClients: testCloudClients, - AWSDatabaseFetcherFactory: dbFetcherFactory, - AWSConfigProvider: fakeConfigProvider, - ClusterFeatures: func() proto.Features { return proto.Features{} }, - KubernetesClient: fake.NewSimpleClientset(), - AccessPoint: accessPoint, + AWSFetchersClients: &mockFetchersClients{ + AWSConfigProvider: *fakeConfigProvider, + eksClusters: []*ekstypes.Cluster{eksAWSResource}, + }, + CloudClients: testCloudClients, + ClusterFeatures: func() proto.Features { return proto.Features{} }, + KubernetesClient: fake.NewSimpleClientset(), + AccessPoint: getDiscoveryAccessPoint(tlsServer.Auth(), authClient), + AWSDatabaseFetcherFactory: dbFetcherFactory, + AWSConfigProvider: fakeConfigProvider, Matchers: Matchers{ AWS: tc.awsMatchers, Azure: tc.azureMatchers, @@ -2420,7 +2471,7 @@ func TestDiscoveryDatabase(t *testing.T) { cmpopts.IgnoreFields(types.DatabaseStatusV3{}, "CACert"), )) case <-time.After(time.Second): - t.Fatal("Didn't receive reconcile event after 1s") + require.FailNow(t, "Didn't receive reconcile event after 1s") } if tc.wantEvents > 0 { @@ -2601,17 +2652,17 @@ func TestDiscoveryDatabaseRemovingDiscoveryConfigs(t *testing.T) { }) } -func makeEKSCluster(t *testing.T, name, region string, discoveryParams rewriteDiscoveryLabelsParams) (*eks.Cluster, types.KubeCluster) { +func makeEKSCluster(t *testing.T, name, region string, discoveryParams rewriteDiscoveryLabelsParams) (*ekstypes.Cluster, types.KubeCluster) { t.Helper() - eksAWSCluster := &eks.Cluster{ + eksAWSCluster := &ekstypes.Cluster{ Name: aws.String(name), Arn: aws.String(fmt.Sprintf("arn:aws:eks:%s:123456789012:cluster/%s", region, name)), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("prod"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "prod", }, } - actual, err := common.NewKubeClusterFromAWSEKS(aws.StringValue(eksAWSCluster.Name), aws.StringValue(eksAWSCluster.Arn), eksAWSCluster.Tags) + actual, err := common.NewKubeClusterFromAWSEKS(aws.ToString(eksAWSCluster.Name), aws.ToString(eksAWSCluster.Arn), eksAWSCluster.Tags) require.NoError(t, err) discoveryParams.matcherType = types.AWSMatcherEKS rewriteCloudResource(t, actual, discoveryParams) @@ -2986,6 +3037,7 @@ func (m *mockGCPClient) getVMSForProject(projectID string) []*gcpimds.Instance { } return vms } + func (m *mockGCPClient) ListInstances(_ context.Context, projectID, _ string) ([]*gcpimds.Instance, error) { return m.getVMSForProject(projectID), nil } @@ -3697,7 +3749,7 @@ func newPopulatedGCPProjectsMock() *mockProjectsAPI { } func newFakeRedshiftClientProvider(c redshift.DescribeClustersAPIClient) db.RedshiftClientProviderFunc { - return func(cfg awsv2.Config, optFns ...func(*redshift.Options)) db.RedshiftClient { + return func(cfg aws.Config, optFns ...func(*redshift.Options)) db.RedshiftClient { return c } } diff --git a/lib/srv/discovery/fetchers/aws-sync/aws-sync.go b/lib/srv/discovery/fetchers/aws-sync/aws-sync.go index 2a7e928370091..adc450ece9fbc 100644 --- a/lib/srv/discovery/fetchers/aws-sync/aws-sync.go +++ b/lib/srv/discovery/fetchers/aws-sync/aws-sync.go @@ -47,6 +47,8 @@ const pageSize int64 = 500 type Config struct { // CloudClients is the cloud clients to use when fetching AWS resources. CloudClients cloud.Clients + // GetEKSClient gets an AWS EKS client for the given region. + GetEKSClient EKSClientGetter // GetEC2Client gets an AWS EC2 client for the given region. GetEC2Client server.EC2ClientGetter // AccountID is the AWS account ID to use when fetching resources. diff --git a/lib/srv/discovery/fetchers/aws-sync/eks.go b/lib/srv/discovery/fetchers/aws-sync/eks.go index e4a7cc768ecd2..fc1791b4cb13a 100644 --- a/lib/srv/discovery/fetchers/aws-sync/eks.go +++ b/lib/srv/discovery/fetchers/aws-sync/eks.go @@ -22,16 +22,32 @@ import ( "context" "sync" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/gravitational/trace" "golang.org/x/sync/errgroup" "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" + "github.com/gravitational/teleport/lib/cloud/awsconfig" ) +// EKSClientGetter returns an EKS client for aws-sync. +type EKSClientGetter func(ctx context.Context, region string, opts ...awsconfig.OptionsFn) (EKSClient, error) + +// EKSClient is the subset of the EKS interface we use in aws-sync. +type EKSClient interface { + eks.ListClustersAPIClient + eks.DescribeClusterAPIClient + + eks.ListAccessEntriesAPIClient + DescribeAccessEntry(ctx context.Context, params *eks.DescribeAccessEntryInput, optFns ...func(*eks.Options)) (*eks.DescribeAccessEntryOutput, error) + + eks.ListAssociatedAccessPoliciesAPIClient +} + // pollAWSEKSClusters is a function that returns a function that fetches // eks clusters and their access scope levels. func (a *awsFetcher) pollAWSEKSClusters(ctx context.Context, result *Resources, collectErr func(error)) func() error { @@ -70,7 +86,8 @@ func (a *awsFetcher) fetchAWSSEKSClusters(ctx context.Context) (fetchAWSEKSClust collectClusters := func(cluster *accessgraphv1alpha.AWSEKSClusterV1, clusterAssociatedPolicies []*accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1, clusterAccessEntries []*accessgraphv1alpha.AWSEKSClusterAccessEntryV1, - err error) { + err error, + ) { hostsMu.Lock() defer hostsMu.Unlock() if err != nil { @@ -86,41 +103,34 @@ func (a *awsFetcher) fetchAWSSEKSClusters(ctx context.Context) (fetchAWSEKSClust for _, region := range a.Regions { region := region eG.Go(func() error { - eksClient, err := a.CloudClients.GetAWSEKSClient(ctx, region, a.getAWSOptions()...) + eksClient, err := a.GetEKSClient(ctx, region, a.getAWSV2Options()...) if err != nil { collectClusters(nil, nil, nil, trace.Wrap(err)) return nil } var eksClusterNames []string - // ListClustersPagesWithContext returns a list of EKS cluster names existing in the region. - err = eksClient.ListClustersPagesWithContext( - ctx, - &eks.ListClustersInput{}, - func(output *eks.ListClustersOutput, lastPage bool) bool { - for _, cluster := range output.Clusters { - eksClusterNames = append(eksClusterNames, aws.StringValue(cluster)) - } - return !lastPage - - }, - ) - if err != nil { - oldEKSClusters := sliceFilter(existing.EKSClusters, func(cluster *accessgraphv1alpha.AWSEKSClusterV1) bool { - return cluster.Region == region && cluster.AccountId == a.AccountID - }) - oldAccessEntries := sliceFilter(existing.AccessEntries, func(ae *accessgraphv1alpha.AWSEKSClusterAccessEntryV1) bool { - return ae.Cluster.Region == region && ae.AccountId == a.AccountID - }) - oldAssociatedPolicies := sliceFilter(existing.AssociatedAccessPolicies, func(ap *accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1) bool { - return ap.Cluster.Region == region && ap.AccountId == a.AccountID - }) - hostsMu.Lock() - output.clusters = append(output.clusters, oldEKSClusters...) - output.associatedPolicies = append(output.associatedPolicies, oldAssociatedPolicies...) - output.accessEntry = append(output.accessEntry, oldAccessEntries...) - hostsMu.Unlock() + for p := eks.NewListClustersPaginator(eksClient, nil); p.HasMorePages(); { + out, err := p.NextPage(ctx) + if err != nil { + oldEKSClusters := sliceFilter(existing.EKSClusters, func(cluster *accessgraphv1alpha.AWSEKSClusterV1) bool { + return cluster.Region == region && cluster.AccountId == a.AccountID + }) + oldAccessEntries := sliceFilter(existing.AccessEntries, func(ae *accessgraphv1alpha.AWSEKSClusterAccessEntryV1) bool { + return ae.Cluster.Region == region && ae.AccountId == a.AccountID + }) + oldAssociatedPolicies := sliceFilter(existing.AssociatedAccessPolicies, func(ap *accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1) bool { + return ap.Cluster.Region == region && ap.AccountId == a.AccountID + }) + hostsMu.Lock() + output.clusters = append(output.clusters, oldEKSClusters...) + output.associatedPolicies = append(output.associatedPolicies, oldAssociatedPolicies...) + output.accessEntry = append(output.accessEntry, oldAccessEntries...) + hostsMu.Unlock() + break + } + eksClusterNames = append(eksClusterNames, out.Clusters...) } for _, cluster := range eksClusterNames { @@ -134,7 +144,7 @@ func (a *awsFetcher) fetchAWSSEKSClusters(ctx context.Context) (fetchAWSEKSClust return ap.Cluster.Name == cluster && ap.AccountId == a.AccountID && ap.Cluster.Region == region }) // DescribeClusterWithContext retrieves the cluster details. - cluster, err := eksClient.DescribeClusterWithContext(ctx, &eks.DescribeClusterInput{ + cluster, err := eksClient.DescribeCluster(ctx, &eks.DescribeClusterInput{ Name: aws.String(cluster), }, ) @@ -147,7 +157,7 @@ func (a *awsFetcher) fetchAWSSEKSClusters(ctx context.Context) (fetchAWSEKSClust // if eks cluster only allows CONFIGMAP auth, skip polling of access entries and // associated policies. if cluster.Cluster != nil && cluster.Cluster.AccessConfig != nil && - aws.StringValue(cluster.Cluster.AccessConfig.AuthenticationMode) == eks.AuthenticationModeConfigMap { + cluster.Cluster.AccessConfig.AuthenticationMode == ekstypes.AuthenticationModeConfigMap { collectClusters(protoCluster, nil, nil, nil) continue } @@ -181,20 +191,20 @@ func (a *awsFetcher) fetchAWSSEKSClusters(ctx context.Context) (fetchAWSEKSClust // awsEKSClusterToProtoCluster converts an eks.Cluster to accessgraphv1alpha.AWSEKSClusterV1 // representation. -func awsEKSClusterToProtoCluster(cluster *eks.Cluster, region, accountID string) *accessgraphv1alpha.AWSEKSClusterV1 { +func awsEKSClusterToProtoCluster(cluster *ekstypes.Cluster, region, accountID string) *accessgraphv1alpha.AWSEKSClusterV1 { var tags []*accessgraphv1alpha.AWSTag for k, v := range cluster.Tags { tags = append(tags, &accessgraphv1alpha.AWSTag{ Key: k, - Value: strPtrToWrapper(v), + Value: wrapperspb.String(v), }) } return &accessgraphv1alpha.AWSEKSClusterV1{ - Name: aws.StringValue(cluster.Name), - Arn: aws.StringValue(cluster.Arn), + Name: aws.ToString(cluster.Name), + Arn: aws.ToString(cluster.Arn), CreatedAt: awsTimeToProtoTime(cluster.CreatedAt), - Status: aws.StringValue(cluster.Status), + Status: string(cluster.Status), Region: region, AccountId: accountID, Tags: tags, @@ -203,33 +213,23 @@ func awsEKSClusterToProtoCluster(cluster *eks.Cluster, region, accountID string) } // fetchAccessEntries fetches the access entries for the given cluster. -func (a *awsFetcher) fetchAccessEntries(ctx context.Context, eksClient eksiface.EKSAPI, cluster *accessgraphv1alpha.AWSEKSClusterV1) ([]*accessgraphv1alpha.AWSEKSClusterAccessEntryV1, error) { +func (a *awsFetcher) fetchAccessEntries(ctx context.Context, eksClient EKSClient, cluster *accessgraphv1alpha.AWSEKSClusterV1) ([]*accessgraphv1alpha.AWSEKSClusterAccessEntryV1, error) { var accessEntries []string - var errs []error - err := eksClient.ListAccessEntriesPagesWithContext( - ctx, - &eks.ListAccessEntriesInput{ - ClusterName: aws.String(cluster.Name), - }, - func(output *eks.ListAccessEntriesOutput, lastPage bool) bool { - for _, accessEntry := range output.AccessEntries { - if aws.StringValue(accessEntry) == "" { - continue - } - accessEntries = append(accessEntries, aws.StringValue(accessEntry)) - } - return !lastPage - }, - ) - if err != nil { - errs = append(errs, trace.Wrap(err)) - return nil, trace.NewAggregate(errs...) + for p := eks.NewListAccessEntriesPaginator(eksClient, + &eks.ListAccessEntriesInput{ClusterName: aws.String(cluster.Name)}, + ); p.HasMorePages(); { + out, err := p.NextPage(ctx) + if err != nil { + return nil, trace.Wrap(err) + } + accessEntries = append(accessEntries, out.AccessEntries...) } + var errs []error var protoAccessEntries []*accessgraphv1alpha.AWSEKSClusterAccessEntryV1 for _, accessEntry := range accessEntries { - rsp, err := eksClient.DescribeAccessEntryWithContext( + rsp, err := eksClient.DescribeAccessEntry( ctx, &eks.DescribeAccessEntryInput{ PrincipalArn: aws.String(accessEntry), @@ -247,84 +247,81 @@ func (a *awsFetcher) fetchAccessEntries(ctx context.Context, eksClient eksiface. ) protoAccessEntries = append(protoAccessEntries, protoAccessEntry) } + return protoAccessEntries, trace.NewAggregate(errs...) } // awsAccessEntryToProtoAccessEntry converts an eks.AccessEntry to accessgraphv1alpha.AWSEKSClusterV1 -func awsAccessEntryToProtoAccessEntry(accessEntry *eks.AccessEntry, cluster *accessgraphv1alpha.AWSEKSClusterV1, accountID string) *accessgraphv1alpha.AWSEKSClusterAccessEntryV1 { - var tags []*accessgraphv1alpha.AWSTag +func awsAccessEntryToProtoAccessEntry(accessEntry *ekstypes.AccessEntry, cluster *accessgraphv1alpha.AWSEKSClusterV1, accountID string) *accessgraphv1alpha.AWSEKSClusterAccessEntryV1 { + tags := make([]*accessgraphv1alpha.AWSTag, 0, len(accessEntry.Tags)) for k, v := range accessEntry.Tags { tags = append(tags, &accessgraphv1alpha.AWSTag{ Key: k, - Value: strPtrToWrapper(v), + Value: wrapperspb.String(v), }) } - out := &accessgraphv1alpha.AWSEKSClusterAccessEntryV1{ + + return &accessgraphv1alpha.AWSEKSClusterAccessEntryV1{ Cluster: cluster, - AccessEntryArn: aws.StringValue(accessEntry.AccessEntryArn), + AccessEntryArn: aws.ToString(accessEntry.AccessEntryArn), CreatedAt: awsTimeToProtoTime(accessEntry.CreatedAt), - KubernetesGroups: aws.StringValueSlice(accessEntry.KubernetesGroups), - Username: aws.StringValue(accessEntry.Username), + KubernetesGroups: accessEntry.KubernetesGroups, + Username: aws.ToString(accessEntry.Username), ModifiedAt: awsTimeToProtoTime(accessEntry.ModifiedAt), - PrincipalArn: aws.StringValue(accessEntry.PrincipalArn), - Type: aws.StringValue(accessEntry.Type), + PrincipalArn: aws.ToString(accessEntry.PrincipalArn), + Type: aws.ToString(accessEntry.Type), Tags: tags, AccountId: accountID, LastSyncTime: timestamppb.Now(), } - - return out } // fetchAccessEntries fetches the access entries for the given cluster. -func (a *awsFetcher) fetchAssociatedPolicies(ctx context.Context, eksClient eksiface.EKSAPI, cluster *accessgraphv1alpha.AWSEKSClusterV1, arns []string) ([]*accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1, error) { +func (a *awsFetcher) fetchAssociatedPolicies(ctx context.Context, eksClient EKSClient, cluster *accessgraphv1alpha.AWSEKSClusterV1, arns []string) ([]*accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1, error) { var associatedPolicies []*accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1 var errs []error + for _, arn := range arns { - err := eksClient.ListAssociatedAccessPoliciesPagesWithContext( - ctx, + for p := eks.NewListAssociatedAccessPoliciesPaginator(eksClient, &eks.ListAssociatedAccessPoliciesInput{ ClusterName: aws.String(cluster.Name), PrincipalArn: aws.String(arn), }, - func(output *eks.ListAssociatedAccessPoliciesOutput, lastPage bool) bool { - for _, policy := range output.AssociatedAccessPolicies { - associatedPolicies = append(associatedPolicies, - awsAssociatedAccessPolicy(policy, cluster, arn, a.AccountID), - ) - } - return !lastPage - }, - ) - if err != nil { - errs = append(errs, trace.Wrap(err)) - + ); p.HasMorePages(); { + out, err := p.NextPage(ctx) + if err != nil { + errs = append(errs, err) + break + } + for _, policy := range out.AssociatedAccessPolicies { + associatedPolicies = append(associatedPolicies, + awsAssociatedAccessPolicy(policy, cluster, arn, a.AccountID), + ) + } } - } return associatedPolicies, trace.NewAggregate(errs...) } // awsAssociatedAccessPolicy converts an eks.AssociatedAccessPolicy to accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1 -func awsAssociatedAccessPolicy(policy *eks.AssociatedAccessPolicy, cluster *accessgraphv1alpha.AWSEKSClusterV1, principalARN, accountID string) *accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1 { +func awsAssociatedAccessPolicy(policy ekstypes.AssociatedAccessPolicy, cluster *accessgraphv1alpha.AWSEKSClusterV1, principalARN, accountID string) *accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1 { var accessScope *accessgraphv1alpha.AWSEKSAccessScopeV1 if policy.AccessScope != nil { accessScope = &accessgraphv1alpha.AWSEKSAccessScopeV1{ - Namespaces: aws.StringValueSlice(policy.AccessScope.Namespaces), - Type: aws.StringValue(policy.AccessScope.Type), + Namespaces: policy.AccessScope.Namespaces, + Type: string(policy.AccessScope.Type), } } - out := &accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1{ + + return &accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1{ Cluster: cluster, AssociatedAt: awsTimeToProtoTime(policy.AssociatedAt), ModifiedAt: awsTimeToProtoTime(policy.ModifiedAt), PrincipalArn: principalARN, - PolicyArn: aws.StringValue(policy.PolicyArn), + PolicyArn: aws.ToString(policy.PolicyArn), Scope: accessScope, AccountId: accountID, LastSyncTime: timestamppb.Now(), } - - return out } diff --git a/lib/srv/discovery/fetchers/aws-sync/eks_test.go b/lib/srv/discovery/fetchers/aws-sync/eks_test.go index 9c6c395018d95..b38f1ff851a92 100644 --- a/lib/srv/discovery/fetchers/aws-sync/eks_test.go +++ b/lib/srv/discovery/fetchers/aws-sync/eks_test.go @@ -24,8 +24,9 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" "google.golang.org/protobuf/testing/protocmp" @@ -33,23 +34,82 @@ import ( "google.golang.org/protobuf/types/known/wrapperspb" accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" - "github.com/gravitational/teleport/lib/cloud" - "github.com/gravitational/teleport/lib/cloud/mocks" + "github.com/gravitational/teleport/lib/cloud/awsconfig" ) -var ( - date = time.Date(2024, 03, 12, 0, 0, 0, 0, time.UTC) +var date = time.Date(2024, 0o3, 12, 0, 0, 0, 0, time.UTC) + +const ( principalARN = "arn:iam:teleport" accessEntryARN = "arn:iam:access_entry" ) +type mockedEKSClient struct { + clusters []*ekstypes.Cluster + accessEntries []*ekstypes.AccessEntry + associatedAccessPolicies []ekstypes.AssociatedAccessPolicy +} + +func (m *mockedEKSClient) DescribeCluster(ctx context.Context, input *eks.DescribeClusterInput, optFns ...func(*eks.Options)) (*eks.DescribeClusterOutput, error) { + for _, cluster := range m.clusters { + if aws.ToString(cluster.Name) == aws.ToString(input.Name) { + return &eks.DescribeClusterOutput{ + Cluster: cluster, + }, nil + } + } + return nil, nil +} + +func (m *mockedEKSClient) ListClusters(ctx context.Context, input *eks.ListClustersInput, optFns ...func(*eks.Options)) (*eks.ListClustersOutput, error) { + clusterNames := make([]string, 0, len(m.clusters)) + for _, cluster := range m.clusters { + clusterNames = append(clusterNames, aws.ToString(cluster.Name)) + } + return &eks.ListClustersOutput{ + Clusters: clusterNames, + }, nil +} + +func (m *mockedEKSClient) ListAccessEntries(ctx context.Context, input *eks.ListAccessEntriesInput, optFns ...func(*eks.Options)) (*eks.ListAccessEntriesOutput, error) { + accessEntries := make([]string, 0, len(m.accessEntries)) + for _, accessEntry := range m.accessEntries { + accessEntries = append(accessEntries, aws.ToString(accessEntry.AccessEntryArn)) + } + return &eks.ListAccessEntriesOutput{ + AccessEntries: accessEntries, + }, nil +} + +func (m *mockedEKSClient) ListAssociatedAccessPolicies(ctx context.Context, input *eks.ListAssociatedAccessPoliciesInput, optFns ...func(*eks.Options)) (*eks.ListAssociatedAccessPoliciesOutput, error) { + return &eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: m.associatedAccessPolicies, + }, nil +} + +func (m *mockedEKSClient) DescribeAccessEntry(ctx context.Context, input *eks.DescribeAccessEntryInput, optFns ...func(*eks.Options)) (*eks.DescribeAccessEntryOutput, error) { + return &eks.DescribeAccessEntryOutput{ + AccessEntry: &ekstypes.AccessEntry{ + PrincipalArn: aws.String(principalARN), + AccessEntryArn: aws.String(accessEntryARN), + CreatedAt: aws.Time(date), + ModifiedAt: aws.Time(date), + ClusterName: aws.String("cluster1"), + Tags: map[string]string{ + "t1": "t2", + }, + Type: aws.String(string(ekstypes.AccessScopeTypeCluster)), + Username: aws.String("teleport"), + KubernetesGroups: []string{"teleport"}, + }, + }, nil +} + func TestPollAWSEKSClusters(t *testing.T) { const ( accountID = "12345678" ) - var ( - regions = []string{"eu-west-1"} - ) + regions := []string{"eu-west-1"} cluster := &accessgraphv1alpha.AWSEKSClusterV1{ Name: "cluster1", Arn: "arn:us-west1:eks:cluster1", @@ -58,7 +118,7 @@ func TestPollAWSEKSClusters(t *testing.T) { Tags: []*accessgraphv1alpha.AWSTag{ { Key: "tag1", - Value: nil, + Value: wrapperspb.String(""), }, { Key: "tag2", @@ -102,7 +162,7 @@ func TestPollAWSEKSClusters(t *testing.T) { Cluster: cluster, PrincipalArn: principalARN, Scope: &accessgraphv1alpha.AWSEKSAccessScopeV1{ - Type: eks.AccessScopeTypeCluster, + Type: string(ekstypes.AccessScopeTypeCluster), Namespaces: []string{"ns1"}, }, AssociatedAt: timestamppb.New(date), @@ -116,12 +176,14 @@ func TestPollAWSEKSClusters(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mockedClients := &cloud.TestCloudClients{ - EKS: &mocks.EKSMock{ - Clusters: eksClusters(), - AccessEntries: accessEntries(), - AssociatedPolicies: associatedPolicies(), - }, + t.Parallel() + + getEKSClient := func(_ context.Context, _ string, _ ...awsconfig.OptionsFn) (EKSClient, error) { + return &mockedEKSClient{ + clusters: eksClusters(), + accessEntries: accessEntries(), + associatedAccessPolicies: associatedPolicies(), + }, nil } var ( @@ -137,20 +199,21 @@ func TestPollAWSEKSClusters(t *testing.T) { a := &awsFetcher{ Config: Config{ AccountID: accountID, - CloudClients: mockedClients, Regions: regions, Integration: accountID, + GetEKSClient: getEKSClient, }, lastResult: &Resources{}, } - result := &Resources{} - execFunc := a.pollAWSEKSClusters(context.Background(), result, collectErr) + + var result Resources + execFunc := a.pollAWSEKSClusters(context.Background(), &result, collectErr) require.NoError(t, execFunc()) require.Empty(t, cmp.Diff( tt.want, - result, + &result, protocmp.Transform(), - // tags originate from a map so we must sort them before comparing. + // Tags originate from a map so we must sort them before comparing. protocmp.SortRepeated( func(a, b *accessgraphv1alpha.AWSTag) bool { return a.Key < b.Key @@ -159,52 +222,50 @@ func TestPollAWSEKSClusters(t *testing.T) { protocmp.IgnoreFields(&accessgraphv1alpha.AWSEKSClusterV1{}, "last_sync_time"), protocmp.IgnoreFields(&accessgraphv1alpha.AWSEKSAssociatedAccessPolicyV1{}, "last_sync_time"), protocmp.IgnoreFields(&accessgraphv1alpha.AWSEKSClusterAccessEntryV1{}, "last_sync_time"), - ), - ) - + )) }) } } -func eksClusters() []*eks.Cluster { - return []*eks.Cluster{ +func eksClusters() []*ekstypes.Cluster { + return []*ekstypes.Cluster{ { Name: aws.String("cluster1"), Arn: aws.String("arn:us-west1:eks:cluster1"), CreatedAt: aws.Time(date), - Status: aws.String(eks.AddonStatusActive), - Tags: map[string]*string{ - "tag1": nil, - "tag2": aws.String("val2"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "tag1": "", + "tag2": "val2", }, }, } } -func accessEntries() []*eks.AccessEntry { - return []*eks.AccessEntry{ +func accessEntries() []*ekstypes.AccessEntry { + return []*ekstypes.AccessEntry{ { PrincipalArn: aws.String(principalARN), AccessEntryArn: aws.String(accessEntryARN), CreatedAt: aws.Time(date), ModifiedAt: aws.Time(date), ClusterName: aws.String("cluster1"), - Tags: map[string]*string{ - "t1": aws.String("t2"), + Tags: map[string]string{ + "t1": "t2", }, - Type: aws.String(eks.AccessScopeTypeCluster), + Type: aws.String(string(ekstypes.AccessScopeTypeCluster)), Username: aws.String("teleport"), - KubernetesGroups: []*string{aws.String("teleport")}, + KubernetesGroups: []string{"teleport"}, }, } } -func associatedPolicies() []*eks.AssociatedAccessPolicy { - return []*eks.AssociatedAccessPolicy{ +func associatedPolicies() []ekstypes.AssociatedAccessPolicy { + return []ekstypes.AssociatedAccessPolicy{ { - AccessScope: &eks.AccessScope{ - Namespaces: []*string{aws.String("ns1")}, - Type: aws.String(eks.AccessScopeTypeCluster), + AccessScope: &ekstypes.AccessScope{ + Namespaces: []string{"ns1"}, + Type: ekstypes.AccessScopeTypeCluster, }, ModifiedAt: aws.Time(date), AssociatedAt: aws.Time(date), diff --git a/lib/srv/discovery/fetchers/eks.go b/lib/srv/discovery/fetchers/eks.go index 193244bba75e3..27dcbdd2d83fd 100644 --- a/lib/srv/discovery/fetchers/eks.go +++ b/lib/srv/discovery/fetchers/eks.go @@ -29,13 +29,12 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "golang.org/x/sync/errgroup" @@ -48,8 +47,8 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/cloud" awslib "github.com/gravitational/teleport/lib/cloud/aws" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/fixtures" kubeutils "github.com/gravitational/teleport/lib/kube/utils" "github.com/gravitational/teleport/lib/services" @@ -63,24 +62,48 @@ const ( type eksFetcher struct { EKSFetcherConfig - mu sync.Mutex - client eksiface.EKSAPI - stsClient stsiface.STSAPI - callerIdentity string + mu sync.Mutex + client EKSClient + stsPresignClient STSPresignClient + callerIdentity string } -// ClientGetter is an interface for getting an EKS client and an STS client. -type ClientGetter interface { - // GetAWSEKSClient returns AWS EKS client for the specified region. - GetAWSEKSClient(ctx context.Context, region string, opts ...cloud.AWSOptionsFn) (eksiface.EKSAPI, error) - // GetAWSSTSClient returns AWS STS client for the specified region. - GetAWSSTSClient(ctx context.Context, region string, opts ...cloud.AWSOptionsFn) (stsiface.STSAPI, error) +// EKSClient is the subset of the EKS interface we use in fetchers. +type EKSClient interface { + eks.DescribeClusterAPIClient + eks.ListClustersAPIClient + + AssociateAccessPolicy(ctx context.Context, params *eks.AssociateAccessPolicyInput, optFns ...func(*eks.Options)) (*eks.AssociateAccessPolicyOutput, error) + CreateAccessEntry(ctx context.Context, params *eks.CreateAccessEntryInput, optFns ...func(*eks.Options)) (*eks.CreateAccessEntryOutput, error) + DeleteAccessEntry(ctx context.Context, params *eks.DeleteAccessEntryInput, optFns ...func(*eks.Options)) (*eks.DeleteAccessEntryOutput, error) + DescribeAccessEntry(ctx context.Context, params *eks.DescribeAccessEntryInput, optFns ...func(*eks.Options)) (*eks.DescribeAccessEntryOutput, error) + UpdateAccessEntry(ctx context.Context, params *eks.UpdateAccessEntryInput, optFns ...func(*eks.Options)) (*eks.UpdateAccessEntryOutput, error) +} + +// STSClient is the subset of the STS interface we use in fetchers. +type STSClient interface { + GetCallerIdentity(ctx context.Context, params *sts.GetCallerIdentityInput, optFns ...func(*sts.Options)) (*sts.GetCallerIdentityOutput, error) + stscreds.AssumeRoleAPIClient +} + +// STSPresignClient is the subset of the STS presign interface we use in fetchers. +type STSPresignClient = kubeutils.STSPresignClient + +// AWSClientGetter is an interface for getting an EKS client and an STS client. +type AWSClientGetter interface { + awsconfig.Provider + // GetAWSEKSClient returns AWS EKS client for the specified config. + GetAWSEKSClient(aws.Config) EKSClient + // GetAWSSTSClient returns AWS STS client for the specified config. + GetAWSSTSClient(aws.Config) STSClient + // GetAWSSTSPresignClient returns AWS STS presign client for the specified config. + GetAWSSTSPresignClient(aws.Config) STSPresignClient } // EKSFetcherConfig configures the EKS fetcher. type EKSFetcherConfig struct { // ClientGetter retrieves an EKS client and an STS client. - ClientGetter ClientGetter + ClientGetter AWSClientGetter // AssumeRole provides a role ARN and ExternalID to assume an AWS role // when fetching clusters. AssumeRole types.AssumeRole @@ -133,7 +156,7 @@ func (c *EKSFetcherConfig) CheckAndSetDefaults() error { // MakeEKSFetchersFromAWSMatchers creates fetchers from the provided matchers. Returned fetchers are separated // by their reliance on the integration. -func MakeEKSFetchersFromAWSMatchers(logger *slog.Logger, clients cloud.AWSClients, matchers []types.AWSMatcher, discoveryConfigName string) (kubeFetchers []common.Fetcher, _ error) { +func MakeEKSFetchersFromAWSMatchers(logger *slog.Logger, clients AWSClientGetter, matchers []types.AWSMatcher, discoveryConfigName string) (kubeFetchers []common.Fetcher, _ error) { for _, matcher := range matchers { var matcherAssumeRole types.AssumeRole if matcher.AssumeRole != nil { @@ -162,7 +185,8 @@ func MakeEKSFetchersFromAWSMatchers(logger *slog.Logger, clients cloud.AWSClient "error", err, "region", region, "labels", matcher.Tags, - "assume_role", matcherAssumeRole.RoleARN) + "assume_role", matcherAssumeRole.RoleARN, + ) continue } kubeFetchers = append(kubeFetchers, fetcher) @@ -197,7 +221,7 @@ func NewEKSFetcher(cfg EKSFetcherConfig) (common.Fetcher, error) { return fetcher, nil } -func (a *eksFetcher) getClient(ctx context.Context) (eksiface.EKSAPI, error) { +func (a *eksFetcher) getClient(ctx context.Context) (EKSClient, error) { a.mu.Lock() defer a.mu.Unlock() @@ -205,16 +229,12 @@ func (a *eksFetcher) getClient(ctx context.Context) (eksiface.EKSAPI, error) { return a.client, nil } - client, err := a.ClientGetter.GetAWSEKSClient( - ctx, - a.Region, - a.getAWSOpts()..., - ) + cfg, err := a.ClientGetter.GetConfig(ctx, a.Region, a.getAWSOpts()...) if err != nil { return nil, trace.Wrap(err) } - a.client = client + a.client = a.ClientGetter.GetAWSEKSClient(cfg) return a.client, nil } @@ -280,39 +300,38 @@ func (a *eksFetcher) getEKSClusters(ctx context.Context) (types.KubeClusters, er return nil, trace.Wrap(err, "failed getting AWS EKS client") } - err = client.ListClustersPagesWithContext(ctx, - &eks.ListClustersInput{ - Include: nil, // For now we should only list EKS clusters - }, - func(clustersList *eks.ListClustersOutput, _ bool) bool { - for i := 0; i < len(clustersList.Clusters); i++ { - clusterName := aws.StringValue(clustersList.Clusters[i]) - // group.Go will block if the concurrency limit is reached. - // It will resume once any running function finishes. - group.Go(func() error { - cluster, err := a.getMatchingKubeCluster(groupCtx, clusterName) - // trace.CompareFailed is returned if the cluster did not match the matcher filtering labels - // or if the cluster is not yet active. - if trace.IsCompareFailed(err) { - a.Logger.DebugContext(groupCtx, "Cluster did not match the filtering criteria", "error", err, "cluster", clusterName) - // never return an error otherwise we will impact discovery process - return nil - } else if err != nil { - a.Logger.WarnContext(groupCtx, "Failed to discover EKS cluster", "error", err, "cluster", clusterName) - // never return an error otherwise we will impact discovery process - return nil - } - - mu.Lock() - defer mu.Unlock() - clusters = append(clusters, cluster) + // For now we should only list EKS clusters so we use nil (default) input param. + for p := eks.NewListClustersPaginator(client, nil); p.HasMorePages(); { + out, err := p.NextPage(ctx) + if err != nil { + return clusters, trace.Wrap(err) + } + for _, clusterName := range out.Clusters { + // group.Go will block if the concurrency limit is reached. + // It will resume once any running function finishes. + group.Go(func() error { + cluster, err := a.getMatchingKubeCluster(groupCtx, clusterName) + // trace.CompareFailed is returned if the cluster did not match the matcher filtering labels + // or if the cluster is not yet active. + if trace.IsCompareFailed(err) { + a.Logger.DebugContext(groupCtx, "Cluster did not match the filtering criteria", "error", err, "cluster", clusterName) + // never return an error otherwise we will impact discovery process return nil - }) - } - return true - }, - ) - // error can be discarded since we do not return any error from group.Go closure. + } else if err != nil { + a.Logger.WarnContext(groupCtx, "Failed to discover EKS cluster", "error", err, "cluster", clusterName) + // never return an error otherwise we will impact discovery process + return nil + } + + mu.Lock() + defer mu.Unlock() + clusters = append(clusters, cluster) + return nil + }) + } + } + + // The error can be discarded since we do not return any error from group.Go closure. _ = group.Wait() return clusters, trace.Wrap(err) } @@ -352,7 +371,7 @@ func (a *eksFetcher) getMatchingKubeCluster(ctx context.Context, clusterName str return nil, trace.Wrap(err, "failed getting AWS EKS client") } - rsp, err := client.DescribeClusterWithContext( + rsp, err := client.DescribeCluster( ctx, &eks.DescribeClusterInput{ Name: aws.String(clusterName), @@ -362,14 +381,14 @@ func (a *eksFetcher) getMatchingKubeCluster(ctx context.Context, clusterName str return nil, trace.WrapWithMessage(err, "Unable to describe EKS cluster %q", clusterName) } - switch st := aws.StringValue(rsp.Cluster.Status); st { - case eks.ClusterStatusUpdating, eks.ClusterStatusActive: + switch st := rsp.Cluster.Status; st { + case ekstypes.ClusterStatusUpdating, ekstypes.ClusterStatusActive: a.Logger.DebugContext(ctx, "EKS cluster status is valid", "status", st, "cluster", clusterName) default: return nil, trace.CompareFailed("EKS cluster %q not enrolled due to its current status: %s", clusterName, st) } - cluster, err := common.NewKubeClusterFromAWSEKS(aws.StringValue(rsp.Cluster.Name), aws.StringValue(rsp.Cluster.Arn), rsp.Cluster.Tags) + cluster, err := common.NewKubeClusterFromAWSEKS(aws.ToString(rsp.Cluster.Name), aws.ToString(rsp.Cluster.Arn), rsp.Cluster.Tags) if err != nil { return nil, trace.WrapWithMessage(err, "Unable to convert eks.Cluster cluster into types.KubernetesClusterV3.") } @@ -388,8 +407,8 @@ func (a *eksFetcher) getMatchingKubeCluster(ctx context.Context, clusterName str // If the fetcher should setup access for the specified ARN, first check if the cluster authentication mode // is set to either [eks.AuthenticationModeApi] or [eks.AuthenticationModeApiAndConfigMap]. // If the authentication mode is set to [eks.AuthenticationModeConfigMap], the fetcher will ignore the cluster. - switch st := aws.StringValue(rsp.Cluster.AccessConfig.AuthenticationMode); st { - case eks.AuthenticationModeApiAndConfigMap, eks.AuthenticationModeApi: + switch st := rsp.Cluster.AccessConfig.AuthenticationMode; st { + case ekstypes.AuthenticationModeApiAndConfigMap, ekstypes.AuthenticationModeApi: if err := a.checkOrSetupAccessForARN(ctx, client, rsp.Cluster); err != nil { return nil, trace.Wrap(err, "unable to setup access for EKS cluster %q", clusterName) } @@ -427,9 +446,9 @@ var eksDiscoveryPermissions = []string{ // The check involves checking if the access entry exists and if the "teleport:kube-agent:eks" is part of the Kubernetes group. // If the access entry doesn't exist or is misconfigured, the fetcher will temporarily gain admin access and create the role and binding. // The fetcher will then upsert the access entry with the correct Kubernetes group. -func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client eksiface.EKSAPI, cluster *eks.Cluster) error { +func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client EKSClient, cluster *ekstypes.Cluster) error { entry, err := convertAWSError( - client.DescribeAccessEntryWithContext(ctx, + client.DescribeAccessEntry(ctx, &eks.DescribeAccessEntryInput{ ClusterName: cluster.Name, PrincipalArn: aws.String(a.SetupAccessForARN), @@ -442,13 +461,13 @@ func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client eksifa // Access denied means that the principal does not have access to setup access entries for the cluster. a.Logger.WarnContext(ctx, "Access denied to setup access for EKS cluster, ensure the required permissions are set", "error", err, - "cluster", aws.StringValue(cluster.Name), + "cluster", aws.ToString(cluster.Name), "required_permissions", eksDiscoveryPermissions, ) return nil case err == nil: // If the access entry exists and the principal has access to the cluster, check if the teleportKubernetesGroup is part of the Kubernetes group. - if entry.AccessEntry != nil && slices.Contains(aws.StringValueSlice(entry.AccessEntry.KubernetesGroups), teleportKubernetesGroup) { + if entry.AccessEntry != nil && slices.Contains(entry.AccessEntry.KubernetesGroups, teleportKubernetesGroup) { return nil } fallthrough @@ -459,12 +478,12 @@ func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client eksifa // Access denied means that the principal does not have access to setup access entries for the cluster. a.Logger.WarnContext(ctx, "Access denied to setup access for EKS cluster, ensure the required permissions are set", "error", err, - "cluster", aws.StringValue(cluster.Name), + "cluster", aws.ToString(cluster.Name), "required_permissions", eksDiscoveryPermissions, ) return nil } else if err != nil { - return trace.Wrap(err, "unable to setup access for EKS cluster %q", aws.StringValue(cluster.Name)) + return trace.Wrap(err, "unable to setup access for EKS cluster %q", aws.ToString(cluster.Name)) } // upsert the access entry with the correct Kubernetes group for the final @@ -473,29 +492,29 @@ func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client eksifa // Access denied means that the principal does not have access to setup access entries for the cluster. a.Logger.WarnContext(ctx, "Access denied to setup access for EKS cluster, ensure the required permissions are set", "error", err, - "cluster", aws.StringValue(cluster.Name), + "cluster", aws.ToString(cluster.Name), "required_permissions", eksDiscoveryPermissions, ) return nil } - return trace.Wrap(err, "unable to setup access for EKS cluster %q", aws.StringValue(cluster.Name)) + return trace.Wrap(err, "unable to setup access for EKS cluster %q", aws.ToString(cluster.Name)) default: return trace.Wrap(err) } - } // temporarilyGainAdminAccessAndCreateRole temporarily gains admin access to the EKS cluster by associating the EKS Cluster Admin Policy // to the callerIdentity. The fetcher will then create the role and binding for the teleportKubernetesGroup in the EKS cluster. -func (a *eksFetcher) temporarilyGainAdminAccessAndCreateRole(ctx context.Context, client eksiface.EKSAPI, cluster *eks.Cluster) error { +func (a *eksFetcher) temporarilyGainAdminAccessAndCreateRole(ctx context.Context, client EKSClient, cluster *ekstypes.Cluster) error { const ( // https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html // We use cluster admin policy to create namespace and cluster role. eksClusterAdminPolicy = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" ) + // Setup access for the ARN rsp, err := convertAWSError( - client.CreateAccessEntryWithContext(ctx, + client.CreateAccessEntry(ctx, &eks.CreateAccessEntryInput{ ClusterName: cluster.Name, PrincipalArn: aws.String(a.callerIdentity), @@ -510,7 +529,7 @@ func (a *eksFetcher) temporarilyGainAdminAccessAndCreateRole(ctx context.Context if rsp != nil { defer func() { _, err := convertAWSError( - client.DeleteAccessEntryWithContext( + client.DeleteAccessEntry( ctx, &eks.DeleteAccessEntryInput{ ClusterName: cluster.Name, @@ -520,18 +539,17 @@ func (a *eksFetcher) temporarilyGainAdminAccessAndCreateRole(ctx context.Context if err != nil { a.Logger.WarnContext(ctx, "Failed to delete access entry for EKS cluster", "error", err, - "cluster", aws.StringValue(cluster.Name), + "cluster", aws.ToString(cluster.Name), ) } }() - } _, err = convertAWSError( - client.AssociateAccessPolicyWithContext(ctx, &eks.AssociateAccessPolicyInput{ - AccessScope: &eks.AccessScope{ + client.AssociateAccessPolicy(ctx, &eks.AssociateAccessPolicyInput{ + AccessScope: &ekstypes.AccessScope{ Namespaces: nil, - Type: aws.String(eks.AccessScopeTypeCluster), + Type: ekstypes.AccessScopeTypeCluster, }, ClusterName: cluster.Name, PolicyArn: aws.String(eksClusterAdminPolicy), @@ -539,7 +557,7 @@ func (a *eksFetcher) temporarilyGainAdminAccessAndCreateRole(ctx context.Context }), ) if err != nil && !trace.IsAlreadyExists(err) { - return trace.Wrap(err, "unable to associate EKS Access Policy to cluster %q", aws.StringValue(cluster.Name)) + return trace.Wrap(err, "unable to associate EKS Access Policy to cluster %q", aws.ToString(cluster.Name)) } timeout := a.Clock.NewTimer(60 * time.Second) @@ -561,17 +579,19 @@ forLoop: } } - return trace.Wrap(err, "unable to upsert role and binding for cluster %q", aws.StringValue(cluster.Name)) + return trace.Wrap(err, "unable to upsert role and binding for cluster %q", aws.ToString(cluster.Name)) } // upsertRoleAndBinding upserts the ClusterRole and ClusterRoleBinding for the teleportKubernetesGroup in the EKS cluster. -func (a *eksFetcher) upsertRoleAndBinding(ctx context.Context, cluster *eks.Cluster) error { - client, err := a.createKubeClient(cluster) +func (a *eksFetcher) upsertRoleAndBinding(ctx context.Context, cluster *ekstypes.Cluster) error { + client, err := a.createKubeClient(ctx, cluster) if err != nil { - return trace.Wrap(err, "unable to create Kubernetes client for cluster %q", aws.StringValue(cluster.Name)) + return trace.Wrap(err, "unable to create Kubernetes client for cluster %q", aws.ToString(cluster.Name)) } + ctx, cancel := context.WithTimeout(ctx, 20*time.Second) defer cancel() + if err := a.upsertClusterRoleWithAdminCredentials(ctx, client); err != nil { return trace.Wrap(err, "unable to upsert ClusterRole for group %q", teleportKubernetesGroup) } @@ -583,23 +603,23 @@ func (a *eksFetcher) upsertRoleAndBinding(ctx context.Context, cluster *eks.Clus return nil } -func (a *eksFetcher) createKubeClient(cluster *eks.Cluster) (*kubernetes.Clientset, error) { - if a.stsClient == nil { - return nil, trace.BadParameter("STS client is not set") +func (a *eksFetcher) createKubeClient(ctx context.Context, cluster *ekstypes.Cluster) (*kubernetes.Clientset, error) { + if a.stsPresignClient == nil { + return nil, trace.BadParameter("STS presign client is not set") } - token, _, err := kubeutils.GenAWSEKSToken(a.stsClient, aws.StringValue(cluster.Name), a.Clock) + token, _, err := kubeutils.GenAWSEKSToken(ctx, a.stsPresignClient, aws.ToString(cluster.Name), a.Clock) if err != nil { - return nil, trace.Wrap(err, "unable to generate EKS token for cluster %q", aws.StringValue(cluster.Name)) + return nil, trace.Wrap(err, "unable to generate EKS token for cluster %q", aws.ToString(cluster.Name)) } - ca, err := base64.StdEncoding.DecodeString(aws.StringValue(cluster.CertificateAuthority.Data)) + ca, err := base64.StdEncoding.DecodeString(aws.ToString(cluster.CertificateAuthority.Data)) if err != nil { - return nil, trace.Wrap(err, "unable to decode EKS cluster %q certificate authority", aws.StringValue(cluster.Name)) + return nil, trace.Wrap(err, "unable to decode EKS cluster %q certificate authority", aws.ToString(cluster.Name)) } - apiEndpoint := aws.StringValue(cluster.Endpoint) + apiEndpoint := aws.ToString(cluster.Endpoint) if len(apiEndpoint) == 0 { - return nil, trace.BadParameter("invalid api endpoint for cluster %q", aws.StringValue(cluster.Name)) + return nil, trace.BadParameter("invalid api endpoint for cluster %q", aws.ToString(cluster.Name)) } client, err := kubernetes.NewForConfig( @@ -611,7 +631,7 @@ func (a *eksFetcher) createKubeClient(cluster *eks.Cluster) (*kubernetes.Clients }, }, ) - return client, trace.Wrap(err, "unable to create Kubernetes client for cluster %q", aws.StringValue(cluster.Name)) + return client, trace.Wrap(err, "unable to create Kubernetes client for cluster %q", aws.ToString(cluster.Name)) } // upsertClusterRoleWithAdminCredentials tries to upsert the ClusterRole using admin credentials. @@ -664,13 +684,13 @@ func (a *eksFetcher) upsertClusterRoleBindingWithAdminCredentials(ctx context.Co } // upsertAccessEntry upserts the access entry for the specified ARN with the teleportKubernetesGroup. -func (a *eksFetcher) upsertAccessEntry(ctx context.Context, client eksiface.EKSAPI, cluster *eks.Cluster) error { +func (a *eksFetcher) upsertAccessEntry(ctx context.Context, client EKSClient, cluster *ekstypes.Cluster) error { _, err := convertAWSError( - client.CreateAccessEntryWithContext(ctx, + client.CreateAccessEntry(ctx, &eks.CreateAccessEntryInput{ ClusterName: cluster.Name, PrincipalArn: aws.String(a.SetupAccessForARN), - KubernetesGroups: aws.StringSlice([]string{teleportKubernetesGroup}), + KubernetesGroups: []string{teleportKubernetesGroup}, }, )) if err == nil || !trace.IsAlreadyExists(err) { @@ -678,11 +698,11 @@ func (a *eksFetcher) upsertAccessEntry(ctx context.Context, client eksiface.EKSA } _, err = convertAWSError( - client.UpdateAccessEntryWithContext(ctx, + client.UpdateAccessEntry(ctx, &eks.UpdateAccessEntryInput{ ClusterName: cluster.Name, PrincipalArn: aws.String(a.SetupAccessForARN), - KubernetesGroups: aws.StringSlice([]string{teleportKubernetesGroup}), + KubernetesGroups: []string{teleportKubernetesGroup}, }, )) @@ -690,35 +710,35 @@ func (a *eksFetcher) upsertAccessEntry(ctx context.Context, client eksiface.EKSA } func (a *eksFetcher) setCallerIdentity(ctx context.Context) error { - var err error - a.stsClient, err = a.ClientGetter.GetAWSSTSClient( - ctx, + cfg, err := a.ClientGetter.GetConfig(ctx, a.Region, a.getAWSOpts()..., ) if err != nil { return trace.Wrap(err) } - + a.stsPresignClient = a.ClientGetter.GetAWSSTSPresignClient(cfg) if a.AssumeRole.RoleARN != "" { a.callerIdentity = a.AssumeRole.RoleARN return nil } - identity, err := a.stsClient.GetCallerIdentityWithContext(ctx, &sts.GetCallerIdentityInput{}) + + stsClient := a.ClientGetter.GetAWSSTSClient(cfg) + identity, err := stsClient.GetCallerIdentity(ctx, &sts.GetCallerIdentityInput{}) if err != nil { return trace.Wrap(err) } - a.callerIdentity = convertAssumedRoleToIAMRole(aws.StringValue(identity.Arn)) + a.callerIdentity = convertAssumedRoleToIAMRole(aws.ToString(identity.Arn)) return nil } -func (a *eksFetcher) getAWSOpts() []cloud.AWSOptionsFn { - return []cloud.AWSOptionsFn{ - cloud.WithAssumeRole( +func (a *eksFetcher) getAWSOpts() []awsconfig.OptionsFn { + return []awsconfig.OptionsFn{ + awsconfig.WithAssumeRole( a.AssumeRole.RoleARN, a.AssumeRole.ExternalID, ), - cloud.WithCredentialsMaybeIntegration(a.Integration), + awsconfig.WithCredentialsMaybeIntegration(a.Integration), } } @@ -734,6 +754,7 @@ func convertAssumedRoleToIAMRole(callerIdentity string) string { const ( assumeRolePrefix = "assumed-role/" roleResource = "role" + serviceName = "iam" ) a, err := arn.Parse(callerIdentity) if err != nil { @@ -742,7 +763,7 @@ func convertAssumedRoleToIAMRole(callerIdentity string) string { if !strings.HasPrefix(a.Resource, assumeRolePrefix) { return callerIdentity } - a.Service = iam.ServiceName + a.Service = serviceName split := strings.Split(a.Resource, "/") if len(split) <= 2 { return callerIdentity diff --git a/lib/srv/discovery/fetchers/eks_test.go b/lib/srv/discovery/fetchers/eks_test.go index d7b9c6b4cac47..ad8c8667d2862 100644 --- a/lib/srv/discovery/fetchers/eks_test.go +++ b/lib/srv/discovery/fetchers/eks_test.go @@ -23,16 +23,16 @@ import ( "errors" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/cloud" + "github.com/gravitational/teleport/lib/cloud/mocks" + kubeutils "github.com/gravitational/teleport/lib/kube/utils" "github.com/gravitational/teleport/lib/srv/discovery/common" "github.com/gravitational/teleport/lib/utils" ) @@ -43,9 +43,10 @@ func TestEKSFetcher(t *testing.T) { filterLabels types.Labels } tests := []struct { - name string - args args - want types.ResourcesWithLabels + name string + args args + assumeRole types.AssumeRole + want types.ResourcesWithLabels }{ { name: "list everything", @@ -57,6 +58,17 @@ func TestEKSFetcher(t *testing.T) { }, want: eksClustersToResources(t, eksMockClusters...), }, + { + name: "list everything with assumed role", + args: args{ + region: types.Wildcard, + filterLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, + }, + assumeRole: types.AssumeRole{RoleARN: "arn:aws:iam::123456789012:role/test-role", ExternalID: "extID123"}, + want: eksClustersToResources(t, eksMockClusters...), + }, { name: "list prod clusters", args: args{ @@ -88,7 +100,6 @@ func TestEKSFetcher(t *testing.T) { }, want: eksClustersToResources(t), }, - { name: "list everything with specified values", args: args{ @@ -102,14 +113,24 @@ func TestEKSFetcher(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + stsClt := &mocks.STSClient{} cfg := EKSFetcherConfig{ - ClientGetter: &mockEKSClientGetter{}, + ClientGetter: &mockEKSClientGetter{ + AWSConfigProvider: mocks.AWSConfigProvider{ + STSClient: stsClt, + }, + }, + AssumeRole: tt.assumeRole, FilterLabels: tt.args.filterLabels, Region: tt.args.region, Logger: utils.NewSlogLoggerForTests(), } fetcher, err := NewEKSFetcher(cfg) require.NoError(t, err) + if tt.assumeRole.RoleARN != "" { + require.Contains(t, stsClt.GetAssumedRoleARNs(), tt.assumeRole.RoleARN) + stsClt.ResetAssumeRoleHistory() + } resources, err := fetcher.Get(context.Background()) require.NoError(t, err) @@ -123,54 +144,68 @@ func TestEKSFetcher(t *testing.T) { } require.Equal(t, tt.want.ToMap(), clusters.ToMap()) + if tt.assumeRole.RoleARN != "" { + require.Contains(t, stsClt.GetAssumedRoleARNs(), tt.assumeRole.RoleARN) + } }) } } -type mockEKSClientGetter struct{} +type mockEKSClientGetter struct { + mocks.AWSConfigProvider +} + +func (e *mockEKSClientGetter) GetAWSEKSClient(cfg aws.Config) EKSClient { + return newPopulatedEKSMock() +} -func (e *mockEKSClientGetter) GetAWSEKSClient(ctx context.Context, region string, opts ...cloud.AWSOptionsFn) (eksiface.EKSAPI, error) { - return newPopulatedEKSMock(), nil +func (e *mockEKSClientGetter) GetAWSSTSClient(aws.Config) STSClient { + return &mockSTSAPI{} } -func (e *mockEKSClientGetter) GetAWSSTSClient(ctx context.Context, region string, opts ...cloud.AWSOptionsFn) (stsiface.STSAPI, error) { - return &mockSTSAPI{}, nil +func (e *mockEKSClientGetter) GetAWSSTSPresignClient(aws.Config) kubeutils.STSPresignClient { + return &mockSTSPresignAPI{} +} + +type mockSTSPresignAPI struct{} + +func (a *mockSTSPresignAPI) PresignGetCallerIdentity(ctx context.Context, params *sts.GetCallerIdentityInput, optFns ...func(*sts.PresignOptions)) (*v4.PresignedHTTPRequest, error) { + panic("not implemented") } type mockSTSAPI struct { - stsiface.STSAPI arn string } -func (a *mockSTSAPI) GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) { +func (a *mockSTSAPI) GetCallerIdentity(context.Context, *sts.GetCallerIdentityInput, ...func(*sts.Options)) (*sts.GetCallerIdentityOutput, error) { return &sts.GetCallerIdentityOutput{ Arn: aws.String(a.arn), }, nil } +func (a *mockSTSAPI) AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) { + panic("not implemented") +} + type mockEKSAPI struct { - eksiface.EKSAPI - clusters []*eks.Cluster + EKSClient + + clusters []*ekstypes.Cluster } -func (m *mockEKSAPI) ListClustersPagesWithContext(ctx aws.Context, req *eks.ListClustersInput, f func(*eks.ListClustersOutput, bool) bool, _ ...request.Option) error { - var names []*string +func (m *mockEKSAPI) ListClusters(ctx context.Context, req *eks.ListClustersInput, _ ...func(*eks.Options)) (*eks.ListClustersOutput, error) { + var names []string for _, cluster := range m.clusters { - names = append(names, cluster.Name) + names = append(names, aws.ToString(cluster.Name)) } - f(&eks.ListClustersOutput{ - Clusters: names[:len(names)/2], - }, false) - - f(&eks.ListClustersOutput{ - Clusters: names[len(names)/2:], - }, true) - return nil + return &eks.ListClustersOutput{ + Clusters: names, + }, nil } -func (m *mockEKSAPI) DescribeClusterWithContext(_ aws.Context, req *eks.DescribeClusterInput, _ ...request.Option) (*eks.DescribeClusterOutput, error) { +func (m *mockEKSAPI) DescribeCluster(_ context.Context, req *eks.DescribeClusterInput, _ ...func(*eks.Options)) (*eks.DescribeClusterOutput, error) { for _, cluster := range m.clusters { - if aws.StringValue(cluster.Name) == aws.StringValue(req.Name) { + if aws.ToString(cluster.Name) == aws.ToString(req.Name) { return &eks.DescribeClusterOutput{ Cluster: cluster, }, nil @@ -185,51 +220,50 @@ func newPopulatedEKSMock() *mockEKSAPI { } } -var eksMockClusters = []*eks.Cluster{ - +var eksMockClusters = []*ekstypes.Cluster{ { Name: aws.String("cluster1"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster1"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("prod"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "prod", + "location": "eu-west-1", }, }, { Name: aws.String("cluster2"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster2"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("prod"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "prod", + "location": "eu-west-1", }, }, { Name: aws.String("cluster3"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster3"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("stg"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "stg", + "location": "eu-west-1", }, }, { Name: aws.String("cluster4"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster1"), - Status: aws.String(eks.ClusterStatusActive), - Tags: map[string]*string{ - "env": aws.String("stg"), - "location": aws.String("eu-west-1"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "env": "stg", + "location": "eu-west-1", }, }, } -func eksClustersToResources(t *testing.T, clusters ...*eks.Cluster) types.ResourcesWithLabels { +func eksClustersToResources(t *testing.T, clusters ...*ekstypes.Cluster) types.ResourcesWithLabels { var kubeClusters types.KubeClusters for _, cluster := range clusters { - kubeCluster, err := common.NewKubeClusterFromAWSEKS(aws.StringValue(cluster.Name), aws.StringValue(cluster.Arn), cluster.Tags) + kubeCluster, err := common.NewKubeClusterFromAWSEKS(aws.ToString(cluster.Name), aws.ToString(cluster.Arn), cluster.Tags) require.NoError(t, err) require.True(t, kubeCluster.IsAWS()) common.ApplyEKSNameSuffix(kubeCluster) diff --git a/lib/srv/discovery/kube_integration_watcher_test.go b/lib/srv/discovery/kube_integration_watcher_test.go index 423339678ae8d..3c7cbd57731fd 100644 --- a/lib/srv/discovery/kube_integration_watcher_test.go +++ b/lib/srv/discovery/kube_integration_watcher_test.go @@ -26,9 +26,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/eks" - eksTypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go-v2/service/sts" - eksV1 "github.com/aws/aws-sdk-go/service/eks" "github.com/google/uuid" "github.com/gravitational/trace" "github.com/stretchr/testify/assert" @@ -45,7 +44,6 @@ import ( "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/authz" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/integrations/awsoidc" "github.com/gravitational/teleport/lib/services" @@ -56,22 +54,24 @@ import ( func TestServer_getKubeFetchers(t *testing.T) { eks1, err := fetchers.NewEKSFetcher(fetchers.EKSFetcherConfig{ - ClientGetter: &cloud.TestCloudClients{STS: &mocks.STSClientV1{}}, + ClientGetter: &mockFetchersClients{}, FilterLabels: types.Labels{"l1": []string{"v1"}}, Region: "region1", }) require.NoError(t, err) eks2, err := fetchers.NewEKSFetcher(fetchers.EKSFetcherConfig{ - ClientGetter: &cloud.TestCloudClients{STS: &mocks.STSClientV1{}}, + ClientGetter: &mockFetchersClients{}, FilterLabels: types.Labels{"l1": []string{"v1"}}, Region: "region1", - Integration: "aws1"}) + Integration: "aws1", + }) require.NoError(t, err) eks3, err := fetchers.NewEKSFetcher(fetchers.EKSFetcherConfig{ - ClientGetter: &cloud.TestCloudClients{STS: &mocks.STSClientV1{}}, + ClientGetter: &mockFetchersClients{}, FilterLabels: types.Labels{"l1": []string{"v1"}}, Region: "region1", - Integration: "aws1"}) + Integration: "aws1", + }) require.NoError(t, err) aks1, err := fetchers.NewAKSFetcher(fetchers.AKSFetcherConfig{ @@ -139,20 +139,51 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { testCAData = "VGVzdENBREFUQQ==" ) - testEKSClusters := []eksTypes.Cluster{ + // Create and start test auth server. + testAuthServer, err := auth.NewTestAuthServer(auth.TestAuthServerConfig{ + Dir: t.TempDir(), + }) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, testAuthServer.Close()) }) + + awsOIDCIntegration, err := types.NewIntegrationAWSOIDC(types.Metadata{ + Name: "integration1", + }, &types.AWSOIDCIntegrationSpecV1{ + RoleARN: roleArn, + }) + require.NoError(t, err) + testAuthServer.AuthServer.IntegrationsTokenGenerator = &mockIntegrationsTokenGenerator{ + proxies: nil, + integrations: map[string]types.Integration{ + awsOIDCIntegration.GetName(): awsOIDCIntegration, + }, + } + + ctx := context.Background() + tlsServer, err := testAuthServer.NewTestTLSServer() + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, tlsServer.Close()) }) + _, err = tlsServer.Auth().CreateIntegration(ctx, awsOIDCIntegration) + require.NoError(t, err) + + fakeConfigProvider := mocks.AWSConfigProvider{ + OIDCIntegrationClient: tlsServer.Auth(), + } + + testEKSClusters := []ekstypes.Cluster{ { Name: aws.String("eks-cluster1"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster1"), Tags: map[string]string{"env": "prod", "location": "eu-west-1"}, - CertificateAuthority: &eksTypes.Certificate{Data: aws.String(testCAData)}, - Status: eksTypes.ClusterStatusActive, + CertificateAuthority: &ekstypes.Certificate{Data: aws.String(testCAData)}, + Status: ekstypes.ClusterStatusActive, }, { Name: aws.String("eks-cluster2"), Arn: aws.String("arn:aws:eks:eu-west-1:accountID:cluster/cluster2"), Tags: map[string]string{"env": "prod", "location": "eu-west-1"}, - CertificateAuthority: &eksTypes.Certificate{Data: aws.String(testCAData)}, - Status: eksTypes.ClusterStatusActive, + CertificateAuthority: &ekstypes.Certificate{Data: aws.String(testCAData)}, + Status: ekstypes.ClusterStatusActive, }, } @@ -173,7 +204,7 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { return dc } - clusterFinder := func(clusterName string) *eksTypes.Cluster { + clusterFinder := func(clusterName string) *ekstypes.Cluster { for _, c := range testEKSClusters { if aws.ToString(c.Name) == clusterName { return &c @@ -309,17 +340,9 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - testCloudClients := &cloud.TestCloudClients{ - STS: &mocks.STSClientV1{}, - EKS: &mockEKSAPI{ - clusters: eksMockClusters[:2], - }, - } - ctx := context.Background() // Create and start test auth server. testAuthServer, err := auth.NewTestAuthServer(auth.TestAuthServerConfig{ @@ -372,7 +395,10 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { discServer, err := New( authz.ContextWithUser(ctx, identity.I), &Config{ - CloudClients: testCloudClients, + AWSFetchersClients: &mockFetchersClients{ + AWSConfigProvider: fakeConfigProvider, + eksClusters: eksMockClusters[:2], + }, ClusterFeatures: func() proto.Features { return proto.Features{} }, KubernetesClient: fake.NewSimpleClientset(), AccessPoint: tc.accessPoint(t, tlsServer.Auth(), authClient), @@ -391,7 +417,7 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { _, err := tlsServer.Auth().DiscoveryConfigs.CreateDiscoveryConfig(ctx, dc) require.NoError(t, err) - // Wait for the DiscoveryConfig to be added to the dynamic fetchers + // Wait for the DiscoveryConfig to be added to the dynamic fetchers. require.Eventually(t, func() bool { discServer.muDynamicKubeFetchers.RLock() defer discServer.muDynamicKubeFetchers.RUnlock() @@ -425,9 +451,9 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { } } -func mustConvertEKSToKubeServerV1(t *testing.T, eksCluster *eksV1.Cluster, resourceID, discoveryGroup string) types.KubeServer { - eksCluster.Tags[types.OriginLabel] = aws.String(types.OriginCloud) - eksCluster.Tags[types.InternalResourceIDLabel] = aws.String(resourceID) +func mustConvertEKSToKubeServerV1(t *testing.T, eksCluster *ekstypes.Cluster, resourceID, _ string) types.KubeServer { + eksCluster.Tags[types.OriginLabel] = types.OriginCloud + eksCluster.Tags[types.InternalResourceIDLabel] = resourceID kubeCluster, err := common.NewKubeClusterFromAWSEKS(aws.ToString(eksCluster.Name), aws.ToString(eksCluster.Arn), eksCluster.Tags) assert.NoError(t, err) @@ -440,13 +466,13 @@ func mustConvertEKSToKubeServerV1(t *testing.T, eksCluster *eksV1.Cluster, resou return kubeServer } -func mustConvertEKSToKubeServerV2(t *testing.T, eksCluster *eksTypes.Cluster, resourceID, discoveryGroup string) types.KubeServer { - eksTags := make(map[string]*string, len(eksCluster.Tags)) +func mustConvertEKSToKubeServerV2(t *testing.T, eksCluster *ekstypes.Cluster, resourceID, _ string) types.KubeServer { + eksTags := make(map[string]string, len(eksCluster.Tags)) for k, v := range eksCluster.Tags { - eksTags[k] = aws.String(v) + eksTags[k] = v } - eksTags[types.OriginLabel] = aws.String(types.OriginCloud) - eksTags[types.InternalResourceIDLabel] = aws.String(resourceID) + eksTags[types.OriginLabel] = types.OriginCloud + eksTags[types.InternalResourceIDLabel] = resourceID kubeCluster, err := common.NewKubeClusterFromAWSEKS(aws.ToString(eksCluster.Name), aws.ToString(eksCluster.Arn), eksTags) assert.NoError(t, err) @@ -476,9 +502,8 @@ func (a *accessPointWrapper) EnrollEKSClusters(ctx context.Context, req *integra } type mockIntegrationsTokenGenerator struct { - proxies []types.Server - integrations map[string]types.Integration - tokenCallsCount int + proxies []types.Server + integrations map[string]types.Integration } // GetIntegration returns the specified integration resources. @@ -497,7 +522,6 @@ func (m *mockIntegrationsTokenGenerator) GetProxies() ([]types.Server, error) { // GenerateAWSOIDCToken generates a token to be used to execute an AWS OIDC Integration action. func (m *mockIntegrationsTokenGenerator) GenerateAWSOIDCToken(ctx context.Context, integration string) (string, error) { - m.tokenCallsCount++ return uuid.NewString(), nil } @@ -509,7 +533,7 @@ type mockEnrollEKSClusterClient struct { describeCluster func(context.Context, *eks.DescribeClusterInput, ...func(*eks.Options)) (*eks.DescribeClusterOutput, error) getCallerIdentity func(context.Context, *sts.GetCallerIdentityInput, ...func(*sts.Options)) (*sts.GetCallerIdentityOutput, error) checkAgentAlreadyInstalled func(context.Context, genericclioptions.RESTClientGetter, *slog.Logger) (bool, error) - installKubeAgent func(context.Context, *eksTypes.Cluster, string, string, string, genericclioptions.RESTClientGetter, *slog.Logger, awsoidc.EnrollEKSClustersRequest) error + installKubeAgent func(context.Context, *ekstypes.Cluster, string, string, string, genericclioptions.RESTClientGetter, *slog.Logger, awsoidc.EnrollEKSClustersRequest) error createToken func(context.Context, types.ProvisionToken) error presignGetCallerIdentityURL func(ctx context.Context, clusterName string) (string, error) } @@ -563,7 +587,7 @@ func (m *mockEnrollEKSClusterClient) CheckAgentAlreadyInstalled(ctx context.Cont return false, nil } -func (m *mockEnrollEKSClusterClient) InstallKubeAgent(ctx context.Context, eksCluster *eksTypes.Cluster, proxyAddr, joinToken, resourceId string, kubeconfig genericclioptions.RESTClientGetter, log *slog.Logger, req awsoidc.EnrollEKSClustersRequest) error { +func (m *mockEnrollEKSClusterClient) InstallKubeAgent(ctx context.Context, eksCluster *ekstypes.Cluster, proxyAddr, joinToken, resourceId string, kubeconfig genericclioptions.RESTClientGetter, log *slog.Logger, req awsoidc.EnrollEKSClustersRequest) error { if m.installKubeAgent != nil { return m.installKubeAgent(ctx, eksCluster, proxyAddr, joinToken, resourceId, kubeconfig, log, req) } From e10b95659b7aba4485bb71fe6501b59f560ac598 Mon Sep 17 00:00:00 2001 From: Lisa Kim Date: Fri, 10 Jan 2025 11:58:59 -0800 Subject: [PATCH 45/45] WebDiscover: Allow setting labels when enrolling single web application (#50853) * Allow labels for generic add web app flow * Update test --- .../teleport/src/Apps/AddApp/AddApp.story.tsx | 48 +++++++++++-- .../teleport/src/Apps/AddApp/AddApp.tsx | 4 ++ .../src/Apps/AddApp/Automatically.test.tsx | 23 ++++-- .../src/Apps/AddApp/Automatically.tsx | 70 +++++++++---------- .../teleport/src/Apps/AddApp/useAddApp.ts | 22 +++++- .../ResourceLabelTooltip.tsx | 26 ++++++- 6 files changed, 142 insertions(+), 51 deletions(-) diff --git a/web/packages/teleport/src/Apps/AddApp/AddApp.story.tsx b/web/packages/teleport/src/Apps/AddApp/AddApp.story.tsx index db9ba0c4007ba..4ae3007934307 100644 --- a/web/packages/teleport/src/Apps/AddApp/AddApp.story.tsx +++ b/web/packages/teleport/src/Apps/AddApp/AddApp.story.tsx @@ -16,18 +16,50 @@ * along with this program. If not, see . */ +import { useState } from 'react'; + +import { JoinToken } from 'teleport/services/joinToken'; + import { AddApp } from './AddApp'; export default { - title: 'Teleport/Apps/Add', + title: 'Teleport/Discover/Application/Web', }; -export const Created = () => ( - -); +export const CreatedWithoutLabels = () => { + const [token, setToken] = useState(); + + return ( + { + setToken(props.token); + return Promise.resolve(true); + }} + /> + ); +}; + +export const CreatedWithLabels = () => { + const [token, setToken] = useState(); -export const Loaded = () => { - return ; + return ( + { + setToken(props.token); + return Promise.resolve(true); + }} + /> + ); }; export const Processing = () => ( @@ -72,8 +104,10 @@ const props = { createJoinToken: () => Promise.resolve(null), version: '5.0.0-dev', reset: () => null, + labels: [], + setLabels: () => null, attempt: { - status: '', + status: 'success', statusText: '', } as any, token: { diff --git a/web/packages/teleport/src/Apps/AddApp/AddApp.tsx b/web/packages/teleport/src/Apps/AddApp/AddApp.tsx index b40735fbce53d..7a82293d33a7a 100644 --- a/web/packages/teleport/src/Apps/AddApp/AddApp.tsx +++ b/web/packages/teleport/src/Apps/AddApp/AddApp.tsx @@ -44,6 +44,8 @@ export function AddApp({ setAutomatic, isAuthTypeLocal, token, + labels, + setLabels, }: State & Props) { return ( )} {!automatic && ( diff --git a/web/packages/teleport/src/Apps/AddApp/Automatically.test.tsx b/web/packages/teleport/src/Apps/AddApp/Automatically.test.tsx index 5761abdbcb42f..ece5ce843aa57 100644 --- a/web/packages/teleport/src/Apps/AddApp/Automatically.test.tsx +++ b/web/packages/teleport/src/Apps/AddApp/Automatically.test.tsx @@ -16,8 +16,6 @@ * along with this program. If not, see . */ -import { act } from '@testing-library/react'; - import { fireEvent, render, screen } from 'design/utils/testing'; import { Automatically, createAppBashCommand } from './Automatically'; @@ -33,12 +31,14 @@ test('render command only after form submit', async () => { roles: [], content: '', }; - render( + const { rerender } = render( {}} onCreate={() => Promise.resolve(true)} + labels={[]} + setLabels={() => null} + token={null} /> ); @@ -56,8 +56,21 @@ test('render command only after form submit', async () => { target: { value: 'https://gravitational.com' }, }); + rerender( + {}} + onCreate={() => Promise.resolve(true)} + labels={[]} + setLabels={() => null} + token={token} + /> + ); + // click button - act(() => screen.getByRole('button', { name: /Generate Script/i }).click()); + fireEvent.click(screen.getByRole('button', { name: /Generate Script/i })); + + await screen.findByText(/Regenerate Script/i); // after form submission should show the command cmd = createAppBashCommand(token.id, 'app-name', 'https://gravitational.com'); diff --git a/web/packages/teleport/src/Apps/AddApp/Automatically.tsx b/web/packages/teleport/src/Apps/AddApp/Automatically.tsx index de6669284f1ce..6e49916ef1261 100644 --- a/web/packages/teleport/src/Apps/AddApp/Automatically.tsx +++ b/web/packages/teleport/src/Apps/AddApp/Automatically.tsx @@ -20,6 +20,7 @@ import { KeyboardEvent, useEffect, useState } from 'react'; import { Alert, + Box, ButtonPrimary, ButtonSecondary, Flex, @@ -33,24 +34,27 @@ import { Attempt } from 'shared/hooks/useAttemptNext'; import TextSelectCopy from 'teleport/components/TextSelectCopy'; import cfg from 'teleport/config'; +import { LabelsCreater } from 'teleport/Discover/Shared'; +import { ResourceLabelTooltip } from 'teleport/Discover/Shared/ResourceLabelTooltip'; +import { ResourceLabel } from 'teleport/services/agents'; import { State } from './useAddApp'; export function Automatically(props: Props) { - const { onClose, attempt, token } = props; + const { onClose, attempt, token, labels, setLabels } = props; const [name, setName] = useState(''); const [uri, setUri] = useState(''); const [cmd, setCmd] = useState(''); useEffect(() => { - if (name && uri) { + if (name && uri && token) { const cmd = createAppBashCommand(token.id, name, uri); setCmd(cmd); } }, [token]); - function handleRegenerate(validator: Validator) { + function onGenerateScript(validator: Validator) { if (!validator.validate()) { return; } @@ -58,25 +62,12 @@ export function Automatically(props: Props) { props.onCreate(name, uri); } - function handleGenerate(validator: Validator) { - if (!validator.validate()) { - return; - } - - const cmd = createAppBashCommand(token.id, name, uri); - setCmd(cmd); - } - function handleEnterPress( e: KeyboardEvent, validator: Validator ) { if (e.key === 'Enter') { - if (cmd) { - handleRegenerate(validator); - } else { - handleGenerate(validator); - } + onGenerateScript(validator); } } @@ -96,6 +87,7 @@ export function Automatically(props: Props) { mr="3" onKeyPress={e => handleEnterPress(e, validator)} onChange={e => setName(e.target.value.toLowerCase())} + disabled={attempt.status === 'processing'} /> handleEnterPress(e, validator)} onChange={e => setUri(e.target.value)} + disabled={attempt.status === 'processing'} /> + + + Add Labels (Optional) + + + + {!cmd && ( Teleport can automatically set up application access. Provide @@ -136,24 +145,13 @@ export function Automatically(props: Props) { )} - {!cmd && ( - handleGenerate(validator)} - > - Generate Script - - )} - {cmd && ( - handleRegenerate(validator)} - > - Regenerate - - )} + onGenerateScript(validator)} + > + {cmd ? 'Regenerate Script' : 'Generate Script'} + ; token: State['token']; attempt: Attempt; + labels: ResourceLabel[]; + setLabels(r: ResourceLabel[]): void; }; diff --git a/web/packages/teleport/src/Apps/AddApp/useAddApp.ts b/web/packages/teleport/src/Apps/AddApp/useAddApp.ts index be04b6cba17fd..cad6afd65c95c 100644 --- a/web/packages/teleport/src/Apps/AddApp/useAddApp.ts +++ b/web/packages/teleport/src/Apps/AddApp/useAddApp.ts @@ -20,6 +20,7 @@ import { useEffect, useState } from 'react'; import useAttempt from 'shared/hooks/useAttemptNext'; +import { ResourceLabel } from 'teleport/services/agents'; import type { JoinToken } from 'teleport/services/joinToken'; import TeleportContext from 'teleport/teleportContext'; @@ -31,14 +32,27 @@ export default function useAddApp(ctx: TeleportContext) { const isEnterprise = ctx.isEnterprise; const [automatic, setAutomatic] = useState(isEnterprise); const [token, setToken] = useState(); + const [labels, setLabels] = useState([]); useEffect(() => { - createToken(); - }, []); + // We don't want to create token on first render + // which defaults to the automatic tab because + // user may want to add labels. + if (!automatic) { + setLabels([]); + // When switching to manual tab, token can be re-used + // if token was already generated from automatic tab. + if (!token) { + createToken(); + } + } + }, [automatic]); function createToken() { return run(() => - ctx.joinTokenService.fetchJoinToken({ roles: ['App'] }).then(setToken) + ctx.joinTokenService + .fetchJoinToken({ roles: ['App'], suggestedLabels: labels }) + .then(setToken) ); } @@ -52,6 +66,8 @@ export default function useAddApp(ctx: TeleportContext) { isAuthTypeLocal, isEnterprise, token, + labels, + setLabels, }; } diff --git a/web/packages/teleport/src/Discover/Shared/ResourceLabelTooltip/ResourceLabelTooltip.tsx b/web/packages/teleport/src/Discover/Shared/ResourceLabelTooltip/ResourceLabelTooltip.tsx index 4feb605ae4692..f0d5ddc8abf5e 100644 --- a/web/packages/teleport/src/Discover/Shared/ResourceLabelTooltip/ResourceLabelTooltip.tsx +++ b/web/packages/teleport/src/Discover/Shared/ResourceLabelTooltip/ResourceLabelTooltip.tsx @@ -37,12 +37,36 @@ export function ResourceLabelTooltip({ resourceKind, toolTipPosition, }: { - resourceKind: 'server' | 'eks' | 'rds' | 'kube' | 'db'; + resourceKind: 'server' | 'eks' | 'rds' | 'kube' | 'db' | 'app'; toolTipPosition?: Position; }) { let tip; switch (resourceKind) { + case 'app': { + tip = ( + <> + Labels allow you to do the following: +
    +
  • + Filter applications by labels when using tsh, tctl, or the web UI. +
  • +
  • + Restrict access to this application with{' '} + + Teleport RBAC + + . Only roles with app_labels that match + these labels will be allowed to access this application. +
  • +
+ + ); + break; + } case 'server': { tip = ( <>