From de3443027be60c3c63ef1fe0a2cf7178b34e6d82 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:37:14 -0500 Subject: [PATCH] Convert lib/srv/discovery to use slog (#50066) --- lib/service/discovery.go | 1 - lib/srv/db/watcher.go | 4 +- lib/srv/discovery/common/database.go | 5 +- lib/srv/discovery/common/kubernetes.go | 7 +- lib/srv/discovery/common/watcher.go | 18 ++--- lib/srv/discovery/database_watcher.go | 2 +- lib/srv/discovery/discovery.go | 19 ++--- lib/srv/discovery/discovery_test.go | 15 ---- lib/srv/discovery/fetchers/aks.go | 16 ++-- lib/srv/discovery/fetchers/aks_test.go | 4 +- lib/srv/discovery/fetchers/db/aws.go | 19 +---- .../discovery/fetchers/db/aws_elasticache.go | 32 +++++--- lib/srv/discovery/fetchers/db/aws_memorydb.go | 25 +++--- .../discovery/fetchers/db/aws_opensearch.go | 14 +++- lib/srv/discovery/fetchers/db/aws_rds.go | 78 +++++++++++-------- .../discovery/fetchers/db/aws_rds_proxy.go | 39 ++++++---- lib/srv/discovery/fetchers/db/aws_redshift.go | 13 ++-- .../fetchers/db/aws_redshift_serverless.go | 50 ++++++++---- lib/srv/discovery/fetchers/db/azure.go | 30 +++---- .../discovery/fetchers/db/azure_dbserver.go | 26 ++++--- .../fetchers/db/azure_managed_sql.go | 23 +++--- .../fetchers/db/azure_managed_sql_test.go | 8 +- .../discovery/fetchers/db/azure_mysql_flex.go | 29 ++++--- .../fetchers/db/azure_postgres_flex.go | 24 +++--- lib/srv/discovery/fetchers/db/azure_redis.go | 26 ++++--- .../fetchers/db/azure_redis_enterprise.go | 30 +++---- lib/srv/discovery/fetchers/db/azure_sql.go | 11 ++- lib/srv/discovery/fetchers/db/db.go | 8 +- lib/srv/discovery/fetchers/eks.go | 62 +++++++++------ lib/srv/discovery/fetchers/eks_test.go | 4 +- lib/srv/discovery/fetchers/gke.go | 14 ++-- lib/srv/discovery/fetchers/gke_test.go | 4 +- lib/srv/discovery/fetchers/kube_services.go | 14 ++-- .../discovery/fetchers/kube_services_test.go | 2 +- lib/srv/discovery/kube_integration_watcher.go | 2 +- .../kube_integration_watcher_test.go | 2 - lib/srv/discovery/kube_services_watcher.go | 2 +- lib/srv/discovery/kube_watcher.go | 6 +- 38 files changed, 379 insertions(+), 309 deletions(-) diff --git a/lib/service/discovery.go b/lib/service/discovery.go index 5ab190b93b764..1cdf5c14a17bf 100644 --- a/lib/service/discovery.go +++ b/lib/service/discovery.go @@ -88,7 +88,6 @@ func (process *TeleportProcess) initDiscoveryService() error { AccessPoint: accessPoint, ServerID: process.Config.HostUUID, Log: process.logger, - LegacyLogger: process.log, ClusterName: conn.ClusterName(), ClusterFeatures: process.GetClusterFeatures, PollInterval: process.Config.Discovery.PollInterval, diff --git a/lib/srv/db/watcher.go b/lib/srv/db/watcher.go index 22bb41c94956e..f4d7fe86d99ec 100644 --- a/lib/srv/db/watcher.go +++ b/lib/srv/db/watcher.go @@ -20,9 +20,9 @@ package db import ( "context" + "log/slog" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -127,7 +127,7 @@ func (s *Server) startCloudWatcher(ctx context.Context) error { watcher, err := discovery.NewWatcher(ctx, discovery.WatcherConfig{ FetchersFn: discovery.StaticFetchers(allFetchers), - Log: logrus.WithField(teleport.ComponentKey, "watcher:cloud"), + Logger: slog.With(teleport.ComponentKey, "watcher:cloud"), Origin: types.OriginCloud, }) if err != nil { diff --git a/lib/srv/discovery/common/database.go b/lib/srv/discovery/common/database.go index 8e51e566cd5fc..237c72f2fc76d 100644 --- a/lib/srv/discovery/common/database.go +++ b/lib/srv/discovery/common/database.go @@ -17,7 +17,9 @@ package common import ( + "context" "fmt" + "log/slog" "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" @@ -36,7 +38,6 @@ import ( "github.com/aws/aws-sdk-go/service/redshift" "github.com/aws/aws-sdk-go/service/redshiftserverless" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" apiawsutils "github.com/gravitational/teleport/api/utils/aws" @@ -1403,7 +1404,7 @@ func labelsFromAzureMySQLFlexServer(server *armmysqlflexibleservers.Server) (map labels[types.DiscoveryLabelAzureReplicationRole] = role ssrid, err := arm.ParseResourceID(azure.StringVal(server.Properties.SourceServerResourceID)) if err != nil { - log.WithError(err).Debugf("Skipping malformed %q label for Azure MySQL Flexible server replica.", types.DiscoveryLabelAzureSourceServer) + slog.DebugContext(context.Background(), "Skipping malformed label for Azure MySQL Flexible server replica", "error", err, "label", types.DiscoveryLabelAzureSourceServer) } else { labels[types.DiscoveryLabelAzureSourceServer] = ssrid.Name } diff --git a/lib/srv/discovery/common/kubernetes.go b/lib/srv/discovery/common/kubernetes.go index c4a7de766c038..9c383a6213fda 100644 --- a/lib/srv/discovery/common/kubernetes.go +++ b/lib/srv/discovery/common/kubernetes.go @@ -17,14 +17,15 @@ package common import ( + "context" "fmt" + "log/slog" "maps" "strings" "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go/aws" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -79,7 +80,7 @@ func awsEKSTagsToLabels(tags map[string]*string) map[string]string { if types.IsValidLabelKey(key) { labels[key] = aws.StringValue(val) } else { - logrus.Debugf("Skipping EKS tag %q, not a valid label key.", key) + slog.DebugContext(context.Background(), "Skipping EKS tag that is not a valid label key", "tag", key) } } return labels @@ -97,7 +98,7 @@ func addLabels(labels map[string]string, moreLabels map[string]string) map[strin if types.IsValidLabelKey(key) { labels[key] = value } else { - logrus.Debugf("Skipping %q, not a valid label key.", key) + slog.DebugContext(context.Background(), "Skipping label that is not a valid label key", "label", key) } } return labels diff --git a/lib/srv/discovery/common/watcher.go b/lib/srv/discovery/common/watcher.go index a08ff8a4f5b7f..e49c61529608e 100644 --- a/lib/srv/discovery/common/watcher.go +++ b/lib/srv/discovery/common/watcher.go @@ -20,13 +20,13 @@ package common import ( "context" + "log/slog" "maps" "sync" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" "github.com/gravitational/teleport/api/types" @@ -49,8 +49,8 @@ type WatcherConfig struct { Interval time.Duration // TriggerFetchC can be used to force an instant Poll, instead of waiting for the next poll Interval. TriggerFetchC chan struct{} - // Log is the watcher logger. - Log logrus.FieldLogger + // Logger is the watcher logger. + Logger *slog.Logger // Clock is used to control time. Clock clockwork.Clock // DiscoveryGroup is the name of the discovery group that the current @@ -75,8 +75,8 @@ func (c *WatcherConfig) CheckAndSetDefaults() error { if c.TriggerFetchC == nil { c.TriggerFetchC = make(chan struct{}) } - if c.Log == nil { - c.Log = logrus.New() + if c.Logger == nil { + c.Logger = slog.Default() } if c.Clock == nil { c.Clock = clockwork.NewRealClock() @@ -117,7 +117,7 @@ func NewWatcher(ctx context.Context, config WatcherConfig) (*Watcher, error) { func (w *Watcher) Start() { pollTimer := w.cfg.Clock.NewTimer(w.cfg.Interval) defer pollTimer.Stop() - w.cfg.Log.Infof("Starting watcher.") + w.cfg.Logger.InfoContext(w.ctx, "Starting watcher") w.fetchAndSend() for { select { @@ -135,7 +135,7 @@ func (w *Watcher) Start() { pollTimer.Reset(w.cfg.Interval) case <-w.ctx.Done(): - w.cfg.Log.Infof("Watcher done.") + w.cfg.Logger.InfoContext(w.ctx, "Watcher done") return } } @@ -163,9 +163,9 @@ func (w *Watcher) fetchAndSend() { // not others. This is acceptable, so make a debug log instead // of a warning. if trace.IsAccessDenied(err) || trace.IsNotFound(err) { - w.cfg.Log.WithError(err).WithField("fetcher", lFetcher).Debugf("Skipped fetcher for %s at %s.", lFetcher.ResourceType(), lFetcher.Cloud()) + w.cfg.Logger.DebugContext(groupCtx, "Skipped fetcher for resources", "error", err, "fetcher", lFetcher, "resource", lFetcher.ResourceType(), "cloud", lFetcher.Cloud()) } else { - w.cfg.Log.WithError(err).WithField("fetcher", lFetcher).Warnf("Unable to fetch resources for %s at %s.", lFetcher.ResourceType(), lFetcher.Cloud()) + w.cfg.Logger.WarnContext(groupCtx, "Unable to fetch resources", "error", err, "fetcher", lFetcher, "resource", lFetcher.ResourceType(), "cloud", lFetcher.Cloud()) } // never return the error otherwise it will impact other watchers. return nil diff --git a/lib/srv/discovery/database_watcher.go b/lib/srv/discovery/database_watcher.go index 4560e6e0ffe58..0ce0c6ef8ef42 100644 --- a/lib/srv/discovery/database_watcher.go +++ b/lib/srv/discovery/database_watcher.go @@ -68,7 +68,7 @@ func (s *Server) startDatabaseWatchers() error { watcher, err := common.NewWatcher(s.ctx, common.WatcherConfig{ FetchersFn: s.getAllDatabaseFetchers, - Log: s.LegacyLogger.WithField("kind", types.KindDatabase), + Logger: s.Log.With("kind", types.KindDatabase), DiscoveryGroup: s.DiscoveryGroup, Interval: s.PollInterval, TriggerFetchC: s.newDiscoveryConfigChangedSub(), diff --git a/lib/srv/discovery/discovery.go b/lib/srv/discovery/discovery.go index 1f8b40af2cdbf..b1fe7c4a1918e 100644 --- a/lib/srv/discovery/discovery.go +++ b/lib/srv/discovery/discovery.go @@ -37,7 +37,6 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/timestamppb" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -133,9 +132,6 @@ type Config struct { AccessPoint authclient.DiscoveryAccessPoint // Log is the logger. Log *slog.Logger - // LegacyLogger is the old logger - // Deprecated: use Log instead. - LegacyLogger logrus.FieldLogger // ServerID identifies the Teleport instance where this service runs. ServerID string // onDatabaseReconcile is called after each database resource reconciliation. @@ -258,9 +254,7 @@ kubernetes matchers are present.`) if c.Log == nil { c.Log = slog.Default() } - if c.LegacyLogger == nil { - c.LegacyLogger = logrus.New() - } + if c.protocolChecker == nil { c.protocolChecker = fetchers.NewProtoChecker(false) } @@ -281,7 +275,6 @@ kubernetes matchers are present.`) } c.Log = c.Log.With(teleport.ComponentKey, teleport.ComponentDiscovery) - c.LegacyLogger = c.LegacyLogger.WithField(teleport.ComponentKey, teleport.ComponentDiscovery) if c.DiscoveryGroup == "" { const warningMessage = "discovery_service.discovery_group is not set. This field is required for the discovery service to work properly.\n" + @@ -573,7 +566,7 @@ func (s *Server) initAWSWatchers(matchers []types.AWSMatcher) error { _, otherMatchers = splitMatchers(otherMatchers, db.IsAWSMatcherType) // Add non-integration kube fetchers. - kubeFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.LegacyLogger, s.CloudClients, otherMatchers, noDiscoveryConfig) + kubeFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.CloudClients, otherMatchers, noDiscoveryConfig) if err != nil { return trace.Wrap(err) } @@ -601,7 +594,7 @@ func (s *Server) initKubeAppWatchers(matchers []types.KubernetesMatcher) error { KubernetesClient: kubeClient, FilterLabels: matcher.Labels, Namespaces: matcher.Namespaces, - Log: s.LegacyLogger, + Logger: s.Log, ClusterName: s.DiscoveryGroup, ProtocolChecker: s.Config.protocolChecker, }) @@ -699,7 +692,7 @@ func (s *Server) kubeFetchersFromMatchers(matchers Matchers, discoveryConfigName return matcherType == types.AWSMatcherEKS }) if len(awsKubeMatchers) > 0 { - eksFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.LegacyLogger, s.CloudClients, awsKubeMatchers, discoveryConfigName) + eksFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.CloudClients, awsKubeMatchers, discoveryConfigName) if err != nil { return nil, trace.Wrap(err) } @@ -766,7 +759,7 @@ func (s *Server) initAzureWatchers(ctx context.Context, matchers []types.AzureMa Regions: matcher.Regions, FilterLabels: matcher.ResourceTags, ResourceGroups: matcher.ResourceGroups, - Log: s.LegacyLogger, + Logger: s.Log, DiscoveryConfigName: discoveryConfigName, }) if err != nil { @@ -857,7 +850,7 @@ func (s *Server) initGCPWatchers(ctx context.Context, matchers []types.GCPMatche Location: location, FilterLabels: matcher.GetLabels(), ProjectID: projectID, - Log: s.LegacyLogger, + Logger: s.Log, }) if err != nil { return trace.Wrap(err) diff --git a/lib/srv/discovery/discovery_test.go b/lib/srv/discovery/discovery_test.go index 7e3e90722d4bb..6b7f3b4b1edc7 100644 --- a/lib/srv/discovery/discovery_test.go +++ b/lib/srv/discovery/discovery_test.go @@ -52,7 +52,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -751,7 +750,6 @@ func TestDiscoveryServer(t *testing.T) { require.NoError(t, err) } - legacyLogger := logrus.New() logger := libutils.NewSlogLoggerForTests() reporter := &mockUsageReporter{} @@ -784,7 +782,6 @@ func TestDiscoveryServer(t *testing.T) { Matchers: tc.staticMatchers, Emitter: tc.emitter, Log: logger, - LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, CloudClients: tc.cloudClients, clock: fakeClock, @@ -845,7 +842,6 @@ func TestDiscoveryServer(t *testing.T) { func TestDiscoveryServerConcurrency(t *testing.T) { t.Parallel() ctx := context.Background() - legacyLogger := logrus.New() logger := libutils.NewSlogLoggerForTests() defaultDiscoveryGroup := "dg01" @@ -942,7 +938,6 @@ func TestDiscoveryServerConcurrency(t *testing.T) { Matchers: staticMatcher, Emitter: emitter, Log: logger, - LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, }) require.NoError(t, err) @@ -1440,12 +1435,8 @@ func TestDiscoveryInCloudKube(t *testing.T) { require.NoError(t, r.Close()) require.NoError(t, w.Close()) }) - - legacyLogger := logrus.New() logger := libutils.NewSlogLoggerForTests() - legacyLogger.SetOutput(w) - legacyLogger.SetLevel(logrus.DebugLevel) clustersNotUpdated := make(chan string, 10) go func() { // reconcileRegexp is the regex extractor of a log message emitted by reconciler when @@ -1484,7 +1475,6 @@ func TestDiscoveryInCloudKube(t *testing.T) { }, Emitter: authClient, Log: logger, - LegacyLogger: legacyLogger, DiscoveryGroup: mainDiscoveryGroup, }) @@ -2852,7 +2842,6 @@ func TestAzureVMDiscovery(t *testing.T) { require.NoError(t, err) } - legacyLogger := logrus.New() logger := libutils.NewSlogLoggerForTests() emitter := &mockEmitter{} @@ -2869,7 +2858,6 @@ func TestAzureVMDiscovery(t *testing.T) { Matchers: tc.staticMatchers, Emitter: emitter, Log: logger, - LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, }) @@ -3161,7 +3149,6 @@ func TestGCPVMDiscovery(t *testing.T) { require.NoError(t, err) } - legacyLogger := logrus.New() logger := libutils.NewSlogLoggerForTests() emitter := &mockEmitter{} reporter := &mockUsageReporter{} @@ -3177,7 +3164,6 @@ func TestGCPVMDiscovery(t *testing.T) { Matchers: tc.staticMatchers, Emitter: emitter, Log: logger, - LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, }) @@ -3225,7 +3211,6 @@ func TestServer_onCreate(t *testing.T) { DiscoveryGroup: "test-cluster", AccessPoint: accessPoint, Log: libutils.NewSlogLoggerForTests(), - LegacyLogger: logrus.New(), }, } diff --git a/lib/srv/discovery/fetchers/aks.go b/lib/srv/discovery/fetchers/aks.go index fd918e6bb7170..0acdc5ad5e353 100644 --- a/lib/srv/discovery/fetchers/aks.go +++ b/lib/srv/discovery/fetchers/aks.go @@ -21,10 +21,10 @@ package fetchers import ( "context" "fmt" + "log/slog" "slices" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -48,7 +48,7 @@ type AKSFetcherConfig struct { // FilterLabels are the filter criteria. FilterLabels types.Labels // Log is the logger. - Log logrus.FieldLogger + Logger *slog.Logger // DiscoveryConfigName is the name of the DiscoveryConfig that created this Fetcher. DiscoveryConfigName string } @@ -66,8 +66,8 @@ func (c *AKSFetcherConfig) CheckAndSetDefaults() error { return trace.BadParameter("missing FilterLabels field") } - if c.Log == nil { - c.Log = logrus.WithField(teleport.ComponentKey, "fetcher:aks") + if c.Logger == nil { + c.Logger = slog.With(teleport.ComponentKey, "fetcher:aks") } return nil } @@ -90,19 +90,19 @@ func (a *aksFetcher) Get(ctx context.Context) (types.ResourcesWithLabels, error) var kubeClusters types.KubeClusters for _, cluster := range clusters { if !a.isRegionSupported(cluster.Location) { - a.Log.Debugf("Cluster region %q does not match with allowed values.", cluster.Location) + a.Logger.DebugContext(ctx, "Cluster region does not match with allowed values", "region", cluster.Location) continue } kubeCluster, err := common.NewKubeClusterFromAzureAKS(cluster) if err != nil { - a.Log.WithError(err).Warn("Unable to create Kubernetes cluster from azure.AKSCluster.") + a.Logger.WarnContext(ctx, "Unable to create Kubernetes cluster from azure.AKSCluster", "error", err) continue } if match, reason, err := services.MatchLabels(a.FilterLabels, kubeCluster.GetAllLabels()); err != nil { - a.Log.WithError(err).Warn("Unable to match AKS cluster labels against match labels.") + a.Logger.WarnContext(ctx, "Unable to match AKS cluster labels against match labels", "error", err) continue } else if !match { - a.Log.Debugf("AKS cluster labels does not match the selector: %s.", reason) + a.Logger.DebugContext(ctx, "AKS cluster labels does not match the selector", "reason", reason) continue } diff --git a/lib/srv/discovery/fetchers/aks_test.go b/lib/srv/discovery/fetchers/aks_test.go index 500ef2ca0e35a..de9bc9fe2351d 100644 --- a/lib/srv/discovery/fetchers/aks_test.go +++ b/lib/srv/discovery/fetchers/aks_test.go @@ -22,12 +22,12 @@ import ( "context" "testing" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" "github.com/gravitational/teleport/lib/srv/discovery/common" + "github.com/gravitational/teleport/lib/utils" ) func TestAKSFetcher(t *testing.T) { @@ -117,7 +117,7 @@ func TestAKSFetcher(t *testing.T) { FilterLabels: tt.args.filterLabels, Regions: tt.args.regions, ResourceGroups: tt.args.resourceGroups, - Log: logrus.New(), + Logger: utils.NewSlogLoggerForTests(), } fetcher, err := NewAKSFetcher(cfg) require.NoError(t, err) diff --git a/lib/srv/discovery/fetchers/db/aws.go b/lib/srv/discovery/fetchers/db/aws.go index 789cac7ec4990..9ccf26f82b397 100644 --- a/lib/srv/discovery/fetchers/db/aws.go +++ b/lib/srv/discovery/fetchers/db/aws.go @@ -24,7 +24,6 @@ import ( "log/slog" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -56,9 +55,6 @@ type awsFetcherConfig struct { Labels types.Labels // Region is the AWS region selector to match cloud databases. Region string - // Log is a field logger to provide structured logging for each matcher, - // based on its config settings by default. - Log logrus.FieldLogger // Logger is the slog.Logger Logger *slog.Logger // Integration is the integration name to be used to fetch credentials. @@ -84,19 +80,6 @@ func (cfg *awsFetcherConfig) CheckAndSetDefaults(component string) error { if cfg.Region == "" { return trace.BadParameter("missing parameter Region") } - if cfg.Log == nil { - credentialsSource := "environment" - if cfg.Integration != "" { - credentialsSource = fmt.Sprintf("integration:%s", cfg.Integration) - } - cfg.Log = logrus.WithFields(logrus.Fields{ - teleport.ComponentKey: "watch:" + component, - "labels": cfg.Labels, - "region": cfg.Region, - "role": cfg.AssumeRole, - "credentials": credentialsSource, - }) - } if cfg.Logger == nil { credentialsSource := "environment" if cfg.Integration != "" { @@ -148,7 +131,7 @@ func (f *awsFetcher) getDatabases(ctx context.Context) (types.Databases, error) if err != nil { return nil, trace.Wrap(err) } - return filterDatabasesByLabels(databases, f.cfg.Labels, f.cfg.Log), nil + return filterDatabasesByLabels(ctx, databases, f.cfg.Labels, f.cfg.Logger), nil } // rewriteDatabases rewrites the discovered databases. diff --git a/lib/srv/discovery/fetchers/db/aws_elasticache.go b/lib/srv/discovery/fetchers/db/aws_elasticache.go index f27398c0c4c8b..c5c5ebd5fcccf 100644 --- a/lib/srv/discovery/fetchers/db/aws_elasticache.go +++ b/lib/srv/discovery/fetchers/db/aws_elasticache.go @@ -63,14 +63,17 @@ func (f *elastiCachePlugin) GetDatabases(ctx context.Context, cfg *awsFetcherCon var eligibleClusters []*elasticache.ReplicationGroup for _, cluster := range clusters { if !libcloudaws.IsElastiCacheClusterSupported(cluster) { - cfg.Log.Debugf("ElastiCache cluster %q is not supported. Skipping.", aws.StringValue(cluster.ReplicationGroupId)) + cfg.Logger.DebugContext(ctx, "Skipping unsupported ElastiCache cluster", + "cluster", aws.StringValue(cluster.ReplicationGroupId), + ) continue } if !libcloudaws.IsElastiCacheClusterAvailable(cluster) { - cfg.Log.Debugf("The current status of ElastiCache cluster %q is %q. Skipping.", - aws.StringValue(cluster.ReplicationGroupId), - aws.StringValue(cluster.Status)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable ElastiCache cluster", + "cluster", aws.StringValue(cluster.ReplicationGroupId), + "status", aws.StringValue(cluster.Status), + ) continue } @@ -86,17 +89,17 @@ func (f *elastiCachePlugin) GetDatabases(ctx context.Context, cfg *awsFetcherCon allNodes, err := getElastiCacheNodes(ctx, ecClient) if err != nil { if trace.IsAccessDenied(err) { - cfg.Log.WithError(err).Debug("No permissions to describe nodes") + cfg.Logger.DebugContext(ctx, "No permissions to describe nodes", "error", err) } else { - cfg.Log.WithError(err).Info("Failed to describe nodes.") + cfg.Logger.InfoContext(ctx, "Failed to describe nodes", "error", err) } } allSubnetGroups, err := getElastiCacheSubnetGroups(ctx, ecClient) if err != nil { if trace.IsAccessDenied(err) { - cfg.Log.WithError(err).Debug("No permissions to describe subnet groups") + cfg.Logger.DebugContext(ctx, "No permissions to describe subnet groups", "error", err) } else { - cfg.Log.WithError(err).Info("Failed to describe subnet groups.") + cfg.Logger.InfoContext(ctx, "Failed to describe subnet groups", "error", err) } } @@ -108,17 +111,22 @@ func (f *elastiCachePlugin) GetDatabases(ctx context.Context, cfg *awsFetcherCon tags, err := getElastiCacheResourceTags(ctx, ecClient, cluster.ARN) if err != nil { if trace.IsAccessDenied(err) { - cfg.Log.WithError(err).Debug("No permissions to list resource tags") + cfg.Logger.DebugContext(ctx, "No permissions to list resource tags", "error", err) } else { - cfg.Log.WithError(err).Infof("Failed to list resource tags for ElastiCache cluster %q.", aws.StringValue(cluster.ReplicationGroupId)) + cfg.Logger.InfoContext(ctx, "Failed to list resource tags for ElastiCache cluster", + "cluster", aws.StringValue(cluster.ReplicationGroupId), + "error", err, + ) } } extraLabels := common.ExtraElastiCacheLabels(cluster, tags, allNodes, allSubnetGroups) if dbs, err := common.NewDatabasesFromElastiCacheReplicationGroup(cluster, extraLabels); err != nil { - cfg.Log.Infof("Could not convert ElastiCache cluster %q to database resources: %v.", - aws.StringValue(cluster.ReplicationGroupId), err) + cfg.Logger.InfoContext(ctx, "Could not convert ElastiCache cluster to database resources", + "cluster", aws.StringValue(cluster.ReplicationGroupId), + "error", err, + ) } else { databases = append(databases, dbs...) } diff --git a/lib/srv/discovery/fetchers/db/aws_memorydb.go b/lib/srv/discovery/fetchers/db/aws_memorydb.go index 88a8a16f908eb..a559b50125557 100644 --- a/lib/srv/discovery/fetchers/db/aws_memorydb.go +++ b/lib/srv/discovery/fetchers/db/aws_memorydb.go @@ -61,14 +61,15 @@ func (f *memoryDBPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConfig var eligibleClusters []*memorydb.Cluster for _, cluster := range clusters { if !libcloudaws.IsMemoryDBClusterSupported(cluster) { - cfg.Log.Debugf("MemoryDB cluster %q is not supported. Skipping.", aws.StringValue(cluster.Name)) + cfg.Logger.DebugContext(ctx, "Skipping unsupported MemoryDB cluster", "cluster", aws.StringValue(cluster.Name)) continue } if !libcloudaws.IsMemoryDBClusterAvailable(cluster) { - cfg.Log.Debugf("The current status of MemoryDB cluster %q is %q. Skipping.", - aws.StringValue(cluster.Name), - aws.StringValue(cluster.Status)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable MemoryDB cluster", + "cluster", aws.StringValue(cluster.Name), + "status", aws.StringValue(cluster.Status), + ) continue } @@ -84,9 +85,9 @@ func (f *memoryDBPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConfig allSubnetGroups, err := getMemoryDBSubnetGroups(ctx, memDBClient) if err != nil { if trace.IsAccessDenied(err) { - cfg.Log.WithError(err).Debug("No permissions to describe subnet groups") + cfg.Logger.DebugContext(ctx, "No permissions to describe subnet groups", "error", err) } else { - cfg.Log.WithError(err).Info("Failed to describe subnet groups.") + cfg.Logger.InfoContext(ctx, "Failed to describe subnet groups", "error", err) } } @@ -95,16 +96,22 @@ func (f *memoryDBPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConfig tags, err := getMemoryDBResourceTags(ctx, memDBClient, cluster.ARN) if err != nil { if trace.IsAccessDenied(err) { - cfg.Log.WithError(err).Debug("No permissions to list resource tags") + cfg.Logger.DebugContext(ctx, "No permissions to list resource tags", "error", err) } else { - cfg.Log.WithError(err).Infof("Failed to list resource tags for MemoryDB cluster %q.", aws.StringValue(cluster.Name)) + cfg.Logger.InfoContext(ctx, "Failed to list resource tags for MemoryDB cluster ", + "error", err, + "cluster", aws.StringValue(cluster.Name), + ) } } extraLabels := common.ExtraMemoryDBLabels(cluster, tags, allSubnetGroups) database, err := common.NewDatabaseFromMemoryDBCluster(cluster, extraLabels) if err != nil { - cfg.Log.WithError(err).Infof("Could not convert memorydb cluster %q configuration endpoint to database resource.", aws.StringValue(cluster.Name)) + cfg.Logger.InfoContext(ctx, "Could not convert memorydb cluster configuration endpoint to database resource", + "error", err, + "cluster", aws.StringValue(cluster.Name), + ) } else { databases = append(databases, database) } diff --git a/lib/srv/discovery/fetchers/db/aws_opensearch.go b/lib/srv/discovery/fetchers/db/aws_opensearch.go index 381d72c8a267f..9d17500d48800 100644 --- a/lib/srv/discovery/fetchers/db/aws_opensearch.go +++ b/lib/srv/discovery/fetchers/db/aws_opensearch.go @@ -60,7 +60,7 @@ func (f *openSearchPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConf var eligibleDomains []*opensearchservice.DomainStatus for _, domain := range domains { if !libcloudaws.IsOpenSearchDomainAvailable(domain) { - cfg.Log.Debugf("OpenSearch domain %q is unavailable. Skipping.", aws.StringValue(domain.DomainName)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable OpenSearch domain", "domain", aws.StringValue(domain.DomainName)) continue } @@ -77,15 +77,21 @@ func (f *openSearchPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConf if err != nil { if trace.IsAccessDenied(err) { - cfg.Log.WithError(err).Debug("No permissions to list resource tags") + cfg.Logger.DebugContext(ctx, "No permissions to list resource tags", "error", err) } else { - cfg.Log.WithError(err).Infof("Failed to list resource tags for OpenSearch domain %q.", aws.StringValue(domain.DomainName)) + cfg.Logger.InfoContext(ctx, "Failed to list resource tags for OpenSearch domain", + "error", err, + "domain", aws.StringValue(domain.DomainName), + ) } } dbs, err := common.NewDatabasesFromOpenSearchDomain(domain, tags) if err != nil { - cfg.Log.WithError(err).Infof("Could not convert OpenSearch domain %q configuration to database resource.", aws.StringValue(domain.DomainName)) + cfg.Logger.InfoContext(ctx, "Could not convert OpenSearch domain configuration to database resource", + "error", err, + "domain", aws.StringValue(domain.DomainName), + ) } else { databases = append(databases, dbs...) } diff --git a/lib/srv/discovery/fetchers/db/aws_rds.go b/lib/srv/discovery/fetchers/db/aws_rds.go index 36c94dddecd20..639835f2b75a2 100644 --- a/lib/srv/discovery/fetchers/db/aws_rds.go +++ b/lib/srv/discovery/fetchers/db/aws_rds.go @@ -20,13 +20,13 @@ package db import ( "context" + "log/slog" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/rds/rdsiface" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud" @@ -56,31 +56,35 @@ func (f *rdsDBInstancesPlugin) GetDatabases(ctx context.Context, cfg *awsFetcher if err != nil { return nil, trace.Wrap(err) } - instances, err := getAllDBInstances(ctx, rdsClient, maxAWSPages, cfg.Log) + instances, err := getAllDBInstances(ctx, rdsClient, maxAWSPages, cfg.Logger) if err != nil { return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } databases := make(types.Databases, 0, len(instances)) for _, instance := range instances { if !libcloudaws.IsRDSInstanceSupported(instance) { - cfg.Log.Debugf("RDS instance %q (engine mode %v, engine version %v) doesn't support IAM authentication. Skipping.", - aws.StringValue(instance.DBInstanceIdentifier), - aws.StringValue(instance.Engine), - aws.StringValue(instance.EngineVersion)) + cfg.Logger.DebugContext(ctx, "Skipping RDS instance that does not support IAM authentication", + "instance", aws.StringValue(instance.DBInstanceIdentifier), + "engine_mode", aws.StringValue(instance.Engine), + "engine_version", aws.StringValue(instance.EngineVersion), + ) continue } if !libcloudaws.IsRDSInstanceAvailable(instance.DBInstanceStatus, instance.DBInstanceIdentifier) { - cfg.Log.Debugf("The current status of RDS instance %q is %q. Skipping.", - aws.StringValue(instance.DBInstanceIdentifier), - aws.StringValue(instance.DBInstanceStatus)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable RDS instance", + "instance", aws.StringValue(instance.DBInstanceIdentifier), + "status", aws.StringValue(instance.DBInstanceStatus), + ) continue } database, err := common.NewDatabaseFromRDSInstance(instance) if err != nil { - cfg.Log.Warnf("Could not convert RDS instance %q to database resource: %v.", - aws.StringValue(instance.DBInstanceIdentifier), err) + cfg.Logger.WarnContext(ctx, "Could not convert RDS instance to database resource", + "instance", aws.StringValue(instance.DBInstanceIdentifier), + "error", err, + ) } else { databases = append(databases, database) } @@ -90,20 +94,20 @@ func (f *rdsDBInstancesPlugin) GetDatabases(ctx context.Context, cfg *awsFetcher // getAllDBInstances fetches all RDS instances using the provided client, up // to the specified max number of pages. -func getAllDBInstances(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, log logrus.FieldLogger) ([]*rds.DBInstance, error) { - return getAllDBInstancesWithFilters(ctx, rdsClient, maxPages, rdsInstanceEngines(), rdsEmptyFilter(), log) +func getAllDBInstances(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, logger *slog.Logger) ([]*rds.DBInstance, error) { + return getAllDBInstancesWithFilters(ctx, rdsClient, maxPages, rdsInstanceEngines(), rdsEmptyFilter(), logger) } // findDBInstancesForDBCluster returns the DBInstances associated with a given DB Cluster Identifier -func findDBInstancesForDBCluster(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, dbClusterIdentifier string, log logrus.FieldLogger) ([]*rds.DBInstance, error) { - return getAllDBInstancesWithFilters(ctx, rdsClient, maxPages, auroraEngines(), rdsClusterIDFilter(dbClusterIdentifier), log) +func findDBInstancesForDBCluster(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, dbClusterIdentifier string, logger *slog.Logger) ([]*rds.DBInstance, error) { + return getAllDBInstancesWithFilters(ctx, rdsClient, maxPages, auroraEngines(), rdsClusterIDFilter(dbClusterIdentifier), logger) } // getAllDBInstancesWithFilters fetches all RDS instances matching the filters using the provided client, up // to the specified max number of pages. -func getAllDBInstancesWithFilters(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, engines []string, baseFilters []*rds.Filter, log logrus.FieldLogger) ([]*rds.DBInstance, error) { +func getAllDBInstancesWithFilters(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, engines []string, baseFilters []*rds.Filter, logger *slog.Logger) ([]*rds.DBInstance, error) { var instances []*rds.DBInstance - err := retryWithIndividualEngineFilters(log, engines, func(engineFilters []*rds.Filter) error { + err := retryWithIndividualEngineFilters(ctx, logger, engines, func(engineFilters []*rds.Filter) error { var pageNum int var out []*rds.DBInstance err := rdsClient.DescribeDBInstancesPagesWithContext(ctx, &rds.DescribeDBInstancesInput{ @@ -144,37 +148,43 @@ func (f *rdsAuroraClustersPlugin) GetDatabases(ctx context.Context, cfg *awsFetc if err != nil { return nil, trace.Wrap(err) } - clusters, err := getAllDBClusters(ctx, rdsClient, maxAWSPages, cfg.Log) + clusters, err := getAllDBClusters(ctx, rdsClient, maxAWSPages, cfg.Logger) if err != nil { return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } databases := make(types.Databases, 0, len(clusters)) for _, cluster := range clusters { if !libcloudaws.IsRDSClusterSupported(cluster) { - cfg.Log.Debugf("Aurora cluster %q (engine mode %v, engine version %v) doesn't support IAM authentication. Skipping.", - aws.StringValue(cluster.DBClusterIdentifier), - aws.StringValue(cluster.EngineMode), - aws.StringValue(cluster.EngineVersion)) + cfg.Logger.DebugContext(ctx, "Skipping Aurora cluster that does not support IAM authentication", + "cluster", aws.StringValue(cluster.DBClusterIdentifier), + "engine_mode", aws.StringValue(cluster.EngineMode), + "engine_version", aws.StringValue(cluster.EngineVersion), + ) continue } if !libcloudaws.IsDBClusterAvailable(cluster.Status, cluster.DBClusterIdentifier) { - cfg.Log.Debugf("The current status of Aurora cluster %q is %q. Skipping.", - aws.StringValue(cluster.DBClusterIdentifier), - aws.StringValue(cluster.Status)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable Aurora cluster", + "instance", aws.StringValue(cluster.DBClusterIdentifier), + "status", aws.StringValue(cluster.Status), + ) continue } - rdsDBInstances, err := findDBInstancesForDBCluster(ctx, rdsClient, maxAWSPages, aws.StringValue(cluster.DBClusterIdentifier), cfg.Log) + rdsDBInstances, err := findDBInstancesForDBCluster(ctx, rdsClient, maxAWSPages, aws.StringValue(cluster.DBClusterIdentifier), cfg.Logger) if err != nil || len(rdsDBInstances) == 0 { - cfg.Log.Warnf("Could not fetch Member Instance for DB Cluster %q: %v.", - aws.StringValue(cluster.DBClusterIdentifier), err) + cfg.Logger.WarnContext(ctx, "Could not fetch Member Instance for DB Cluster", + "instance", aws.StringValue(cluster.DBClusterIdentifier), + "error", err, + ) } dbs, err := common.NewDatabasesFromRDSCluster(cluster, rdsDBInstances) if err != nil { - cfg.Log.Warnf("Could not convert RDS cluster %q to database resources: %v.", - aws.StringValue(cluster.DBClusterIdentifier), err) + cfg.Logger.WarnContext(ctx, "Could not convert RDS cluster to database resources", + "identifier", aws.StringValue(cluster.DBClusterIdentifier), + "error", err, + ) } databases = append(databases, dbs...) } @@ -183,9 +193,9 @@ func (f *rdsAuroraClustersPlugin) GetDatabases(ctx context.Context, cfg *awsFetc // getAllDBClusters fetches all RDS clusters using the provided client, up to // the specified max number of pages. -func getAllDBClusters(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, log logrus.FieldLogger) ([]*rds.DBCluster, error) { +func getAllDBClusters(ctx context.Context, rdsClient rdsiface.RDSAPI, maxPages int, logger *slog.Logger) ([]*rds.DBCluster, error) { var clusters []*rds.DBCluster - err := retryWithIndividualEngineFilters(log, auroraEngines(), func(filters []*rds.Filter) error { + err := retryWithIndividualEngineFilters(ctx, logger, auroraEngines(), func(filters []*rds.Filter) error { var pageNum int var out []*rds.DBCluster err := rdsClient.DescribeDBClustersPagesWithContext(ctx, &rds.DescribeDBClustersInput{ @@ -252,7 +262,7 @@ type rdsFilterFn func([]*rds.Filter) error // and if the error is an AWS unrecognized engine name error then it will retry once by calling the function with one filter // at a time. If any error other than an AWS unrecognized engine name error occurs, this function will return that error // without retrying, or skip retrying subsequent filters if it has already started to retry. -func retryWithIndividualEngineFilters(log logrus.FieldLogger, engines []string, fn rdsFilterFn) error { +func retryWithIndividualEngineFilters(ctx context.Context, logger *slog.Logger, engines []string, fn rdsFilterFn) error { err := fn(rdsEngineFilter(engines)) if err == nil { return nil @@ -260,7 +270,7 @@ func retryWithIndividualEngineFilters(log logrus.FieldLogger, engines []string, if !isUnrecognizedAWSEngineNameError(err) { return trace.Wrap(err) } - log.WithError(trace.Unwrap(err)).Debug("Teleport supports an engine which is unrecognized in this AWS region. Querying engine names individually.") + logger.DebugContext(ctx, "Teleport supports an engine which is unrecognized in this AWS region. Querying engine names individually.", "error", err) for _, engine := range engines { err := fn(rdsEngineFilter([]string{engine})) if err == nil { diff --git a/lib/srv/discovery/fetchers/db/aws_rds_proxy.go b/lib/srv/discovery/fetchers/db/aws_rds_proxy.go index c7e0e986ae361..dde1a1a189940 100644 --- a/lib/srv/discovery/fetchers/db/aws_rds_proxy.go +++ b/lib/srv/discovery/fetchers/db/aws_rds_proxy.go @@ -65,20 +65,20 @@ func (f *rdsDBProxyPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConf // that owns the custom endpoints. customEndpointsByProxyName, err := getRDSProxyCustomEndpoints(ctx, rdsClient, maxAWSPages) if err != nil { - cfg.Log.Debugf("Failed to get RDS Proxy endpoints: %v.", err) + cfg.Logger.DebugContext(ctx, "Failed to get RDS Proxy endpoints", "error", err) } var databases types.Databases for _, dbProxy := range rdsProxies { if !aws.BoolValue(dbProxy.RequireTLS) { - cfg.Log.Debugf("RDS Proxy %q doesn't support TLS. Skipping.", aws.StringValue(dbProxy.DBProxyName)) + cfg.Logger.DebugContext(ctx, "Skipping RDS Proxy that doesn't support TLS", "rds_proxy", aws.StringValue(dbProxy.DBProxyName)) continue } if !libcloudaws.IsRDSProxyAvailable(dbProxy) { - cfg.Log.Debugf("The current status of RDS Proxy %q is %q. Skipping.", - aws.StringValue(dbProxy.DBProxyName), - aws.StringValue(dbProxy.Status)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable RDS Proxy", + "rds_proxy", aws.StringValue(dbProxy.DBProxyName), + "status", aws.StringValue(dbProxy.Status)) continue } @@ -86,14 +86,19 @@ func (f *rdsDBProxyPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConf // fetch the tags. If failed, keep going without the tags. tags, err := listRDSResourceTags(ctx, rdsClient, dbProxy.DBProxyArn) if err != nil { - cfg.Log.Debugf("Failed to get tags for RDS Proxy %v: %v.", aws.StringValue(dbProxy.DBProxyName), err) + cfg.Logger.DebugContext(ctx, "Failed to get tags for RDS Proxy", + "rds_proxy", aws.StringValue(dbProxy.DBProxyName), + "error", err, + ) } // Add a database from RDS Proxy (default endpoint). database, err := common.NewDatabaseFromRDSProxy(dbProxy, tags) if err != nil { - cfg.Log.Debugf("Could not convert RDS Proxy %q to database resource: %v.", - aws.StringValue(dbProxy.DBProxyName), err) + cfg.Logger.DebugContext(ctx, "Could not convert RDS Proxy to database resource", + "rds_proxy", aws.StringValue(dbProxy.DBProxyName), + "error", err, + ) } else { databases = append(databases, database) } @@ -101,19 +106,21 @@ func (f *rdsDBProxyPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConf // Add custom endpoints. for _, customEndpoint := range customEndpointsByProxyName[aws.StringValue(dbProxy.DBProxyName)] { if !libcloudaws.IsRDSProxyCustomEndpointAvailable(customEndpoint) { - cfg.Log.Debugf("The current status of custom endpoint %q of RDS Proxy %q is %q. Skipping.", - aws.StringValue(customEndpoint.DBProxyEndpointName), - aws.StringValue(customEndpoint.DBProxyName), - aws.StringValue(customEndpoint.Status)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable custom endpoint of RDS Proxy", + "endpoint", aws.StringValue(customEndpoint.DBProxyEndpointName), + "rds_proxy", aws.StringValue(customEndpoint.DBProxyName), + "status", aws.StringValue(customEndpoint.Status), + ) continue } database, err = common.NewDatabaseFromRDSProxyCustomEndpoint(dbProxy, customEndpoint, tags) if err != nil { - cfg.Log.Debugf("Could not convert custom endpoint %q of RDS Proxy %q to database resource: %v.", - aws.StringValue(customEndpoint.DBProxyEndpointName), - aws.StringValue(customEndpoint.DBProxyName), - err) + cfg.Logger.DebugContext(ctx, "Could not convert custom endpoint for RDS Proxy to database resource", + "endpoint", aws.StringValue(customEndpoint.DBProxyEndpointName), + "rds_proxy", aws.StringValue(customEndpoint.DBProxyName), + "error", err, + ) continue } databases = append(databases, database) diff --git a/lib/srv/discovery/fetchers/db/aws_redshift.go b/lib/srv/discovery/fetchers/db/aws_redshift.go index af7674fef4a25..7b4b1bfb35315 100644 --- a/lib/srv/discovery/fetchers/db/aws_redshift.go +++ b/lib/srv/discovery/fetchers/db/aws_redshift.go @@ -57,16 +57,19 @@ func (f *redshiftPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConfig var databases types.Databases for _, cluster := range clusters { if !libcloudaws.IsRedshiftClusterAvailable(cluster) { - cfg.Log.Debugf("The current status of Redshift cluster %q is %q. Skipping.", - aws.StringValue(cluster.ClusterIdentifier), - aws.StringValue(cluster.ClusterStatus)) + cfg.Logger.DebugContext(ctx, "Skipping unavailable Redshift cluster", + "cluster", aws.StringValue(cluster.ClusterIdentifier), + "status", aws.StringValue(cluster.ClusterStatus), + ) continue } database, err := common.NewDatabaseFromRedshiftCluster(cluster) if err != nil { - cfg.Log.Infof("Could not convert Redshift cluster %q to database resource: %v.", - aws.StringValue(cluster.ClusterIdentifier), err) + cfg.Logger.InfoContext(ctx, "Could not convert Redshift cluster to database resource", + "cluster", aws.StringValue(cluster.ClusterIdentifier), + "error", err, + ) continue } diff --git a/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go b/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go index ff0e64a2320f4..651034882b239 100644 --- a/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go +++ b/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go @@ -20,12 +20,12 @@ package db import ( "context" + "log/slog" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/redshiftserverless" "github.com/aws/aws-sdk-go/service/redshiftserverless/redshiftserverlessiface" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud" @@ -65,18 +65,18 @@ func (f *redshiftServerlessPlugin) GetDatabases(ctx context.Context, cfg *awsFet if err != nil { return nil, trace.Wrap(err) } - databases, workgroups, err := getDatabasesFromWorkgroups(ctx, client, cfg.Log) + databases, workgroups, err := getDatabasesFromWorkgroups(ctx, client, cfg.Logger) if err != nil { return nil, trace.Wrap(err) } if len(workgroups) > 0 { - vpcEndpointDatabases, err := getDatabasesFromVPCEndpoints(ctx, workgroups, client, cfg.Log) + vpcEndpointDatabases, err := getDatabasesFromVPCEndpoints(ctx, workgroups, client, cfg.Logger) if err != nil { if trace.IsAccessDenied(err) { - cfg.Log.Debugf("No permission to get Redshift Serverless VPC endpoints: %v.", err) + cfg.Logger.DebugContext(ctx, "No permission to get Redshift Serverless VPC endpoints", "error", err) } else { - cfg.Log.Warnf("Failed to get Redshift Serverless VPC endpoints: %v.", err) + cfg.Logger.WarnContext(ctx, "Failed to get Redshift Serverless VPC endpoints", "error", err) } } @@ -85,7 +85,7 @@ func (f *redshiftServerlessPlugin) GetDatabases(ctx context.Context, cfg *awsFet return databases, nil } -func getDatabasesFromWorkgroups(ctx context.Context, client rssAPI, log logrus.FieldLogger) (types.Databases, []*workgroupWithTags, error) { +func getDatabasesFromWorkgroups(ctx context.Context, client rssAPI, logger *slog.Logger) (types.Databases, []*workgroupWithTags, error) { workgroups, err := getRSSWorkgroups(ctx, client) if err != nil { return nil, nil, trace.Wrap(err) @@ -95,14 +95,20 @@ func getDatabasesFromWorkgroups(ctx context.Context, client rssAPI, log logrus.F var workgroupsWithTags []*workgroupWithTags for _, workgroup := range workgroups { if !libcloudaws.IsResourceAvailable(workgroup, workgroup.Status) { - log.Debugf("The current status of Redshift Serverless workgroup %v is %v. Skipping.", aws.StringValue(workgroup.WorkgroupName), aws.StringValue(workgroup.Status)) + logger.DebugContext(ctx, "Skipping unavailable Redshift Serverless workgroup", + "workgroup", aws.StringValue(workgroup.WorkgroupName), + "status", aws.StringValue(workgroup.Status), + ) continue } - tags := getRSSResourceTags(ctx, workgroup.WorkgroupArn, client, log) + tags := getRSSResourceTags(ctx, workgroup.WorkgroupArn, client, logger) database, err := common.NewDatabaseFromRedshiftServerlessWorkgroup(workgroup, tags) if err != nil { - log.WithError(err).Infof("Could not convert Redshift Serverless workgroup %q to database resource.", aws.StringValue(workgroup.WorkgroupName)) + logger.InfoContext(ctx, "Could not convert Redshift Serverless workgroup to database resource", + "workgroup", aws.StringValue(workgroup.WorkgroupName), + "error", err, + ) continue } @@ -115,7 +121,7 @@ func getDatabasesFromWorkgroups(ctx context.Context, client rssAPI, log logrus.F return databases, workgroupsWithTags, nil } -func getDatabasesFromVPCEndpoints(ctx context.Context, workgroups []*workgroupWithTags, client rssAPI, log logrus.FieldLogger) (types.Databases, error) { +func getDatabasesFromVPCEndpoints(ctx context.Context, workgroups []*workgroupWithTags, client rssAPI, logger *slog.Logger) (types.Databases, error) { endpoints, err := getRSSVPCEndpoints(ctx, client) if err != nil { return nil, trace.Wrap(err) @@ -125,12 +131,15 @@ func getDatabasesFromVPCEndpoints(ctx context.Context, workgroups []*workgroupWi for _, endpoint := range endpoints { workgroup, found := findWorkgroupWithName(workgroups, aws.StringValue(endpoint.WorkgroupName)) if !found { - log.Debugf("Could not find matching workgroup for Redshift Serverless endpoint %v. Skipping.", aws.StringValue(endpoint.EndpointName)) + logger.DebugContext(ctx, "Could not find matching workgroup for Redshift Serverless endpoint", "endpoint", aws.StringValue(endpoint.EndpointName)) continue } if !libcloudaws.IsResourceAvailable(endpoint, endpoint.EndpointStatus) { - log.Debugf("The current status of Redshift Serverless endpoint %v is %v. Skipping.", aws.StringValue(endpoint.EndpointName), aws.StringValue(endpoint.EndpointStatus)) + logger.DebugContext(ctx, "Skipping unavailable Redshift Serverless endpoint", + "endpoint", aws.StringValue(endpoint.EndpointName), + "status", aws.StringValue(endpoint.EndpointStatus), + ) continue } @@ -138,7 +147,10 @@ func getDatabasesFromVPCEndpoints(ctx context.Context, workgroups []*workgroupWi // tags from the workgroups instead. database, err := common.NewDatabaseFromRedshiftServerlessVPCEndpoint(endpoint, workgroup.Workgroup, workgroup.Tags) if err != nil { - log.WithError(err).Infof("Could not convert Redshift Serverless endpoint %q to database resource.", aws.StringValue(endpoint.EndpointName)) + logger.InfoContext(ctx, "Could not convert Redshift Serverless endpoint to database resource", + "endpoint", aws.StringValue(endpoint.EndpointName), + "error", err, + ) continue } databases = append(databases, database) @@ -146,16 +158,22 @@ func getDatabasesFromVPCEndpoints(ctx context.Context, workgroups []*workgroupWi return databases, nil } -func getRSSResourceTags(ctx context.Context, arn *string, client rssAPI, log logrus.FieldLogger) []*redshiftserverless.Tag { +func getRSSResourceTags(ctx context.Context, arn *string, client rssAPI, logger *slog.Logger) []*redshiftserverless.Tag { output, err := client.ListTagsForResourceWithContext(ctx, &redshiftserverless.ListTagsForResourceInput{ ResourceArn: arn, }) if err != nil { // Log errors here and return nil. if trace.IsAccessDenied(err) { - log.WithError(err).Debugf("No Permission to get tags for %q.", aws.StringValue(arn)) + logger.DebugContext(ctx, "No Permission to get Redshift Serverless tags", + "arn", aws.StringValue(arn), + "error", err, + ) } else { - log.WithError(err).Warnf("Failed to get tags for %q.", aws.StringValue(arn)) + logger.WarnContext(ctx, "Failed to get Redshift Serverless tags", + "arn", aws.StringValue(arn), + "error", err, + ) } return nil } diff --git a/lib/srv/discovery/fetchers/db/azure.go b/lib/srv/discovery/fetchers/db/azure.go index a0a8a600760b3..a1674015af2e2 100644 --- a/lib/srv/discovery/fetchers/db/azure.go +++ b/lib/srv/discovery/fetchers/db/azure.go @@ -21,9 +21,9 @@ package db import ( "context" "fmt" + "log/slog" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -50,7 +50,7 @@ type azureFetcherPlugin[DBType comparable, ListClient azureListClient[DBType]] i // GetServerLocation returns the server location. GetServerLocation(server DBType) string // NewDatabaseFromServer creates a types.Database from provided server. - NewDatabaseFromServer(server DBType, log logrus.FieldLogger) types.Database + NewDatabaseFromServer(ctx context.Context, server DBType, logger *slog.Logger) types.Database } // newAzureFetcher returns a Azure DB server fetcher for the provided subscription, group, regions, and tags. @@ -61,14 +61,14 @@ func newAzureFetcher[DBType comparable, ListClient azureListClient[DBType]](conf fetcher := &azureFetcher[DBType, ListClient]{ cfg: config, - log: logrus.WithFields(logrus.Fields{ - teleport.ComponentKey: "watch:azure", - "labels": config.Labels, - "regions": config.Regions, - "group": config.ResourceGroup, - "subscription": config.Subscription, - "type": config.Type, - }), + logger: slog.With( + teleport.ComponentKey, "watch:azure", + "labels", config.Labels, + "regions", config.Regions, + "group", config.ResourceGroup, + "subscription", config.Subscription, + "type", config.Type, + ), azureFetcherPlugin: plugin, } return fetcher, nil @@ -133,8 +133,8 @@ func (c *azureFetcherConfig) CheckAndSetDefaults() error { type azureFetcher[DBType comparable, ListClient azureListClient[DBType]] struct { azureFetcherPlugin[DBType, ListClient] - cfg azureFetcherConfig - log logrus.FieldLogger + cfg azureFetcherConfig + logger *slog.Logger } // Cloud returns the cloud the fetcher is operating. @@ -224,7 +224,7 @@ func (f *azureFetcher[DBType, ListClient]) getAllDBServers(ctx context.Context) servers, err := f.getDBServersInSubscription(ctx, subID) if err != nil { if trace.IsAccessDenied(err) || trace.IsNotFound(err) { - f.log.WithError(err).Debugf("Skipping subscription %q", subID) + f.logger.DebugContext(ctx, "Skipping subscription %q", "subscription", subID, "error", err) continue } return nil, trace.Wrap(err) @@ -252,11 +252,11 @@ func (f *azureFetcher[DBType, ListClient]) getDatabases(ctx context.Context) (ty continue } - if database := f.NewDatabaseFromServer(server, f.log); database != nil { + if database := f.NewDatabaseFromServer(ctx, server, f.logger); database != nil { databases = append(databases, database) } } - return filterDatabasesByLabels(databases, f.cfg.Labels, f.log), nil + return filterDatabasesByLabels(ctx, databases, f.cfg.Labels, f.logger), nil } // String returns the fetcher's string description. diff --git a/lib/srv/discovery/fetchers/db/azure_dbserver.go b/lib/srv/discovery/fetchers/db/azure_dbserver.go index eb752437676fc..60d2b35b3feb4 100644 --- a/lib/srv/discovery/fetchers/db/azure_dbserver.go +++ b/lib/srv/discovery/fetchers/db/azure_dbserver.go @@ -19,8 +19,10 @@ package db import ( + "context" + "log/slog" + "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -57,26 +59,28 @@ func (p *azureDBServerPlugin) GetServerLocation(server *azure.DBServer) string { return server.Location } -func (p *azureDBServerPlugin) NewDatabaseFromServer(server *azure.DBServer, log logrus.FieldLogger) types.Database { +func (p *azureDBServerPlugin) NewDatabaseFromServer(ctx context.Context, server *azure.DBServer, logger *slog.Logger) types.Database { if !server.IsSupported() { - log.Debugf("Azure server %q (version %v) does not support AAD authentication. Skipping.", - server.Name, - server.Properties.Version) + logger.DebugContext(ctx, "Skipping Azure server that does not support AAD authentication", + "server", server.Name, + "version", server.Properties.Version, + ) return nil } if !server.IsAvailable() { - log.Debugf("The current status of Azure server %q is %q. Skipping.", - server.Name, - server.Properties.UserVisibleState) + logger.DebugContext(ctx, "Skippin unavailable Azure server", + "server", server.Name, + "state", server.Properties.UserVisibleState) return nil } database, err := common.NewDatabaseFromAzureServer(server) if err != nil { - log.Warnf("Could not convert Azure server %q to database resource: %v.", - server.Name, - err) + logger.WarnContext(ctx, "Could not convert Azure server to database resource", + "server", server.Name, + "error", err, + ) return nil } return database diff --git a/lib/srv/discovery/fetchers/db/azure_managed_sql.go b/lib/srv/discovery/fetchers/db/azure_managed_sql.go index 02ca949e38dc7..4fa3aa7e4bc0a 100644 --- a/lib/srv/discovery/fetchers/db/azure_managed_sql.go +++ b/lib/srv/discovery/fetchers/db/azure_managed_sql.go @@ -19,9 +19,11 @@ package db import ( + "context" + "log/slog" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -46,17 +48,20 @@ func (f *azureManagedSQLServerFetcher) GetServerLocation(server *armsql.ManagedI return azure.StringVal(server.Location) } -func (f *azureManagedSQLServerFetcher) NewDatabaseFromServer(server *armsql.ManagedInstance, log logrus.FieldLogger) types.Database { +func (f *azureManagedSQLServerFetcher) NewDatabaseFromServer(ctx context.Context, server *armsql.ManagedInstance, logger *slog.Logger) types.Database { if !f.isAvailable(server) { - log.Debugf("The current status of Azure Managed SQL server %q is %q. Skipping.", - azure.StringVal(server.Name), - azure.StringVal(server.Properties.ProvisioningState)) + logger.DebugContext(ctx, "Skipping unavailable Azure Managed SQL server", + "server", azure.StringVal(server.Name), + "provisioning_state", azure.StringVal(server.Properties.ProvisioningState)) return nil } database, err := common.NewDatabaseFromAzureManagedSQLServer(server) if err != nil { - log.Warnf("Could not convert Azure Managed SQL server %q to database resource: %v.", azure.StringVal(server.Name), err) + logger.WarnContext(ctx, "Could not convert Azure Managed SQL server to database resource", + "server", azure.StringVal(server.Name), + "error", err, + ) return nil } @@ -85,9 +90,9 @@ func (f *azureManagedSQLServerFetcher) isAvailable(server *armsql.ManagedInstanc armsql.ManagedInstancePropertiesProvisioningStateUpdating: return true default: - logrus.Warnf("Unknown status type: %q. Assuming Managed SQL Server %q is available.", - azure.StringVal(server.Properties.ProvisioningState), - azure.StringVal(server.Name), + slog.WarnContext(context.Background(), "Assuming Managed SQL Server with unknown status type is available", + "status", azure.StringVal(server.Properties.ProvisioningState), + "server", azure.StringVal(server.Name), ) return true } diff --git a/lib/srv/discovery/fetchers/db/azure_managed_sql_test.go b/lib/srv/discovery/fetchers/db/azure_managed_sql_test.go index 06dfab6a75527..1d4fee9670973 100644 --- a/lib/srv/discovery/fetchers/db/azure_managed_sql_test.go +++ b/lib/srv/discovery/fetchers/db/azure_managed_sql_test.go @@ -19,6 +19,7 @@ package db import ( + "context" "slices" "testing" @@ -30,7 +31,7 @@ import ( ) func TestSQLManagedServerFetcher(t *testing.T) { - logger := utils.NewLoggerForTests() + logger := utils.NewSlogLoggerForTests() fetcher := &azureManagedSQLServerFetcher{} t.Run("NewDatabaseFromServer", func(t *testing.T) { @@ -46,7 +47,7 @@ func TestSQLManagedServerFetcher(t *testing.T) { // For available states, it should return a parsed database. for _, state := range availableStates { t.Run(string(state), func(t *testing.T) { - require.NotNil(t, fetcher.NewDatabaseFromServer(makeManagedSQLInstance(state), logger), "expected to have a database, but got nil") + require.NotNil(t, fetcher.NewDatabaseFromServer(context.Background(), makeManagedSQLInstance(state), logger), "expected to have a database, but got nil") }) } @@ -58,13 +59,14 @@ func TestSQLManagedServerFetcher(t *testing.T) { } t.Run(string(state), func(t *testing.T) { - require.Nil(t, fetcher.NewDatabaseFromServer(makeManagedSQLInstance(state), logger), "expected to have nil, but got a database") + require.Nil(t, fetcher.NewDatabaseFromServer(context.Background(), makeManagedSQLInstance(state), logger), "expected to have nil, but got a database") }) } t.Run("RandomState", func(t *testing.T) { require.NotNil(t, fetcher.NewDatabaseFromServer( + context.Background(), makeManagedSQLInstance("RandomState"), logger, ), diff --git a/lib/srv/discovery/fetchers/db/azure_mysql_flex.go b/lib/srv/discovery/fetchers/db/azure_mysql_flex.go index 45586295e7e7c..84dfdbe5201ab 100644 --- a/lib/srv/discovery/fetchers/db/azure_mysql_flex.go +++ b/lib/srv/discovery/fetchers/db/azure_mysql_flex.go @@ -19,9 +19,11 @@ package db import ( + "context" + "log/slog" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/mysql/armmysqlflexibleservers" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -48,17 +50,21 @@ func (f *azureMySQLFlexServerFetcher) GetServerLocation(server *armmysqlflexible } // NewDatabaseFromServer converts an Azure MySQL Flexible server to a Teleport database. -func (f *azureMySQLFlexServerFetcher) NewDatabaseFromServer(server *armmysqlflexibleservers.Server, log logrus.FieldLogger) types.Database { - if !f.isAvailable(server, log) { - log.Debugf("The current status of Azure MySQL Flexible server %q is %q. Skipping.", - azure.StringVal(server.Name), - azure.StringVal(server.Properties.State)) +func (f *azureMySQLFlexServerFetcher) NewDatabaseFromServer(ctx context.Context, server *armmysqlflexibleservers.Server, logger *slog.Logger) types.Database { + if !f.isAvailable(server, logger) { + logger.DebugContext(ctx, "Skipping unavailable Azure MySQL Flexible server", + "server", azure.StringVal(server.Name), + "state", azure.StringVal(server.Properties.State), + ) return nil } database, err := common.NewDatabaseFromAzureMySQLFlexServer(server) if err != nil { - log.Warnf("Could not convert Azure MySQL server %q to database resource: %v.", azure.StringVal(server.Name), err) + logger.WarnContext(ctx, "Could not convert Azure MySQL server to database resource", + "server", azure.StringVal(server.Name), + "error", err, + ) return nil } return database @@ -66,7 +72,7 @@ func (f *azureMySQLFlexServerFetcher) NewDatabaseFromServer(server *armmysqlflex // isAvailable checks the status of the server and returns true if the server // is available. -func (f *azureMySQLFlexServerFetcher) isAvailable(server *armmysqlflexibleservers.Server, log logrus.FieldLogger) bool { +func (f *azureMySQLFlexServerFetcher) isAvailable(server *armmysqlflexibleservers.Server, logger *slog.Logger) bool { state := armmysqlflexibleservers.ServerState(azure.StringVal(server.Properties.State)) switch state { case armmysqlflexibleservers.ServerStateReady, armmysqlflexibleservers.ServerStateUpdating: @@ -79,8 +85,9 @@ func (f *azureMySQLFlexServerFetcher) isAvailable(server *armmysqlflexibleserver // server state is known and it's not available. return false } - log.Warnf("Unknown status type: %q. Assuming Azure MySQL Flexible server %q is available.", - state, - azure.StringVal(server.Name)) + logger.WarnContext(context.Background(), "Assuming Azure MySQL Flexible server with unknown status type is available", + "status", state, + "server", azure.StringVal(server.Name), + ) return true } diff --git a/lib/srv/discovery/fetchers/db/azure_postgres_flex.go b/lib/srv/discovery/fetchers/db/azure_postgres_flex.go index a31306f1d6206..97143b494f397 100644 --- a/lib/srv/discovery/fetchers/db/azure_postgres_flex.go +++ b/lib/srv/discovery/fetchers/db/azure_postgres_flex.go @@ -19,9 +19,11 @@ package db import ( + "context" + "log/slog" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -48,9 +50,9 @@ func (f *azurePostgresFlexServerFetcher) GetServerLocation(server *armpostgresql } // NewDatabaseFromServer converts an Azure PostgreSQL server to a Teleport database. -func (f *azurePostgresFlexServerFetcher) NewDatabaseFromServer(server *armpostgresqlflexibleservers.Server, log logrus.FieldLogger) types.Database { - if !f.isAvailable(server, log) { - log.Debugf("The current status of Azure PostgreSQL Flexible server %q is %q. Skipping.", +func (f *azurePostgresFlexServerFetcher) NewDatabaseFromServer(ctx context.Context, server *armpostgresqlflexibleservers.Server, logger *slog.Logger) types.Database { + if !f.isAvailable(server, logger) { + logger.DebugContext(ctx, "Skipping unavailable Azure PostgreSQL Flexible server", azure.StringVal(server.Name), azure.StringVal(server.Properties.State)) return nil @@ -58,7 +60,10 @@ func (f *azurePostgresFlexServerFetcher) NewDatabaseFromServer(server *armpostgr database, err := common.NewDatabaseFromAzurePostgresFlexServer(server) if err != nil { - log.Warnf("Could not convert Azure PostgreSQL server %q to database resource: %v.", azure.StringVal(server.Name), err) + logger.WarnContext(ctx, "Could not convert Azure PostgreSQL server to database resource", + "server", azure.StringVal(server.Name), + "error", err, + ) return nil } return database @@ -66,7 +71,7 @@ func (f *azurePostgresFlexServerFetcher) NewDatabaseFromServer(server *armpostgr // isAvailable checks the status of the server and returns true if the server // is available. -func (f *azurePostgresFlexServerFetcher) isAvailable(server *armpostgresqlflexibleservers.Server, log logrus.FieldLogger) bool { +func (f *azurePostgresFlexServerFetcher) isAvailable(server *armpostgresqlflexibleservers.Server, logger *slog.Logger) bool { state := armpostgresqlflexibleservers.ServerState(azure.StringVal(server.Properties.State)) switch state { case armpostgresqlflexibleservers.ServerStateReady, armpostgresqlflexibleservers.ServerStateUpdating: @@ -79,8 +84,9 @@ func (f *azurePostgresFlexServerFetcher) isAvailable(server *armpostgresqlflexib // server state is known and it's not available. return false } - log.Warnf("Unknown status type: %q. Assuming Azure PostgreSQL Flexible server %q is available.", - state, - azure.StringVal(server.Name)) + logger.WarnContext(context.Background(), "Assuming Azure PostgreSQL Flexible server with unknown status is available", + "status", state, + "server", azure.StringVal(server.Name), + ) return true } diff --git a/lib/srv/discovery/fetchers/db/azure_redis.go b/lib/srv/discovery/fetchers/db/azure_redis.go index 56e24524a558d..a04879db42da7 100644 --- a/lib/srv/discovery/fetchers/db/azure_redis.go +++ b/lib/srv/discovery/fetchers/db/azure_redis.go @@ -19,9 +19,11 @@ package db import ( + "context" + "log/slog" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redis/armredis/v3" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -45,22 +47,26 @@ func (p *azureRedisPlugin) GetServerLocation(server *armredis.ResourceInfo) stri return azure.StringVal(server.Location) } -func (p *azureRedisPlugin) NewDatabaseFromServer(server *armredis.ResourceInfo, log logrus.FieldLogger) types.Database { +func (p *azureRedisPlugin) NewDatabaseFromServer(ctx context.Context, server *armredis.ResourceInfo, logger *slog.Logger) types.Database { if server.Properties.SSLPort == nil { // should never happen, but checking just in case. - log.Debugf("Azure Redis server %v is missing SSL port. Skipping.", azure.StringVal(server.Name)) + logger.DebugContext(ctx, "Skipping Azure Redis server with missing SSL port", "server", azure.StringVal(server.Name)) return nil } if !p.isAvailable(server) { - log.Debugf("The current status of Azure Redis server %q is %q. Skipping.", - azure.StringVal(server.Name), - azure.StringVal(server.Properties.ProvisioningState)) + logger.DebugContext(ctx, "Skipping unavailable Azure Redis server", + "server", azure.StringVal(server.Name), + "status", azure.StringVal(server.Properties.ProvisioningState), + ) return nil } database, err := common.NewDatabaseFromAzureRedis(server) if err != nil { - log.Warnf("Could not convert Azure Redis server %q to database resource: %v.", azure.StringVal(server.Name), err) + logger.WarnContext(ctx, "Could not convert Azure Redis server to database resource", + "server", azure.StringVal(server.Name), + "error", err, + ) return nil } return database @@ -85,9 +91,9 @@ func (p *azureRedisPlugin) isAvailable(server *armredis.ResourceInfo) bool { armredis.ProvisioningStateUnprovisioning: return false default: - logrus.Warnf("Unknown status type: %q. Assuming Azure Redis %q is available.", - azure.StringVal(server.Properties.ProvisioningState), - azure.StringVal(server.Name), + slog.WarnContext(context.Background(), "Assuming Azure Redis with unknown status type is available", + "status", azure.StringVal(server.Properties.ProvisioningState), + "server", azure.StringVal(server.Name), ) return true } diff --git a/lib/srv/discovery/fetchers/db/azure_redis_enterprise.go b/lib/srv/discovery/fetchers/db/azure_redis_enterprise.go index f3c6770b8ee4e..61a84580483e3 100644 --- a/lib/srv/discovery/fetchers/db/azure_redis_enterprise.go +++ b/lib/srv/discovery/fetchers/db/azure_redis_enterprise.go @@ -19,9 +19,11 @@ package db import ( + "context" + "log/slog" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redisenterprise/armredisenterprise" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -44,32 +46,32 @@ func (p *azureRedisEnterprisePlugin) GetServerLocation(server *azure.RedisEnterp return azure.StringVal(server.Cluster.Location) } -func (p *azureRedisEnterprisePlugin) NewDatabaseFromServer(server *azure.RedisEnterpriseDatabase, log logrus.FieldLogger) types.Database { +func (p *azureRedisEnterprisePlugin) NewDatabaseFromServer(ctx context.Context, server *azure.RedisEnterpriseDatabase, logger *slog.Logger) types.Database { if server.Properties == nil || server.Cluster.Properties == nil { return nil } if azure.StringVal(server.Properties.ClientProtocol) != string(armredisenterprise.ProtocolEncrypted) { - log.Debugf("Azure Redis Enterprise %v is running unsupported protocol %v. Skipping.", - server, - azure.StringVal(server.Properties.ClientProtocol), + logger.DebugContext(ctx, "Skipping Azure Redis Enterprise with unsupported protocol", + "server", server, + "protocol", azure.StringVal(server.Properties.ClientProtocol), ) return nil } if !p.isAvailable(server) { - log.Debugf("The current status of Azure Redis Enterprise %v is %q. Skipping.", - server, - azure.StringVal(server.Properties.ProvisioningState), + logger.DebugContext(ctx, "Skipping unavailable Azure Redis Enterprise server", + "server", server, + "status", azure.StringVal(server.Properties.ProvisioningState), ) return nil } database, err := common.NewDatabaseFromAzureRedisEnterprise(server.Cluster, server.Database) if err != nil { - log.Warnf("Could not convert Azure Redis Enterprise %v to database resource: %v.", - server, - err, + logger.WarnContext(ctx, "Could not convert Azure Redis Enterprise to database resource", + "server", server, + "error", err, ) return nil } @@ -90,9 +92,9 @@ func (p *azureRedisEnterprisePlugin) isAvailable(server *azure.RedisEnterpriseDa armredisenterprise.ProvisioningStateFailed: return false default: - logrus.Warnf("Unknown status type: %q. Assuming Azure Enterprise Redis %v is available.", - azure.StringVal(server.Properties.ProvisioningState), - server, + slog.WarnContext(context.Background(), "Assuming Azure Enterprise Redis with unknown status type is available", + "status", azure.StringVal(server.Properties.ProvisioningState), + "server", server, ) return true } diff --git a/lib/srv/discovery/fetchers/db/azure_sql.go b/lib/srv/discovery/fetchers/db/azure_sql.go index 5d25da6599073..eebda757556cf 100644 --- a/lib/srv/discovery/fetchers/db/azure_sql.go +++ b/lib/srv/discovery/fetchers/db/azure_sql.go @@ -19,9 +19,11 @@ package db import ( + "context" + "log/slog" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/azure" @@ -45,10 +47,13 @@ func (f *azureSQLServerFetcher) GetServerLocation(server *armsql.Server) string return azure.StringVal(server.Location) } -func (f *azureSQLServerFetcher) NewDatabaseFromServer(server *armsql.Server, log logrus.FieldLogger) types.Database { +func (f *azureSQLServerFetcher) NewDatabaseFromServer(ctx context.Context, server *armsql.Server, logger *slog.Logger) types.Database { database, err := common.NewDatabaseFromAzureSQLServer(server) if err != nil { - log.Warnf("Could not convert Azure SQL server %q to database resource: %v.", azure.StringVal(server.Name), err) + logger.WarnContext(ctx, "Could not convert Azure SQL server to database resource", + "server", azure.StringVal(server.Name), + "error", err, + ) return nil } diff --git a/lib/srv/discovery/fetchers/db/db.go b/lib/srv/discovery/fetchers/db/db.go index d1307a1e97444..c3c3cbeec6dbe 100644 --- a/lib/srv/discovery/fetchers/db/db.go +++ b/lib/srv/discovery/fetchers/db/db.go @@ -20,9 +20,9 @@ package db import ( "context" + "log/slog" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "golang.org/x/exp/maps" "github.com/gravitational/teleport/api/types" @@ -133,16 +133,16 @@ func MakeAzureFetchers(clients cloud.AzureClients, matchers []types.AzureMatcher } // filterDatabasesByLabels filters input databases with provided labels. -func filterDatabasesByLabels(databases types.Databases, labels types.Labels, log logrus.FieldLogger) types.Databases { +func filterDatabasesByLabels(ctx context.Context, databases types.Databases, labels types.Labels, logger *slog.Logger) types.Databases { var matchedDatabases types.Databases for _, database := range databases { match, _, err := services.MatchLabels(labels, database.GetAllLabels()) if err != nil { - log.Warnf("Failed to match %v against selector: %v.", database, err) + logger.WarnContext(ctx, "Failed to match database gainst selector", "database", database, "error", err) } else if match { matchedDatabases = append(matchedDatabases, database) } else { - log.Debugf("%v doesn't match selector.", database) + logger.DebugContext(ctx, "database doesn't match selector", "database", database) } } return matchedDatabases diff --git a/lib/srv/discovery/fetchers/eks.go b/lib/srv/discovery/fetchers/eks.go index eb02b838804e9..193244bba75e3 100644 --- a/lib/srv/discovery/fetchers/eks.go +++ b/lib/srv/discovery/fetchers/eks.go @@ -22,6 +22,7 @@ import ( "context" "encoding/base64" "fmt" + "log/slog" "path" "slices" "strings" @@ -37,7 +38,6 @@ import ( "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" rbacv1 "k8s.io/api/rbac/v1" kubeerrors "k8s.io/apimachinery/pkg/api/errors" @@ -100,7 +100,7 @@ type EKSFetcherConfig struct { // FilterLabels are the filter criteria. FilterLabels types.Labels // Log is the logger. - Log logrus.FieldLogger + Logger *slog.Logger // SetupAccessForARN is the ARN to setup access for. SetupAccessForARN string // Clock is the clock. @@ -120,8 +120,8 @@ func (c *EKSFetcherConfig) CheckAndSetDefaults() error { return trace.BadParameter("missing FilterLabels field") } - if c.Log == nil { - c.Log = logrus.WithField(teleport.ComponentKey, "fetcher:eks") + if c.Logger == nil { + c.Logger = slog.With(teleport.ComponentKey, "fetcher:eks") } if c.Clock == nil { @@ -133,7 +133,7 @@ func (c *EKSFetcherConfig) CheckAndSetDefaults() error { // MakeEKSFetchersFromAWSMatchers creates fetchers from the provided matchers. Returned fetchers are separated // by their reliance on the integration. -func MakeEKSFetchersFromAWSMatchers(log logrus.FieldLogger, clients cloud.AWSClients, matchers []types.AWSMatcher, discoveryConfigName string) (kubeFetchers []common.Fetcher, _ error) { +func MakeEKSFetchersFromAWSMatchers(logger *slog.Logger, clients cloud.AWSClients, matchers []types.AWSMatcher, discoveryConfigName string) (kubeFetchers []common.Fetcher, _ error) { for _, matcher := range matchers { var matcherAssumeRole types.AssumeRole if matcher.AssumeRole != nil { @@ -152,13 +152,17 @@ func MakeEKSFetchersFromAWSMatchers(log logrus.FieldLogger, clients cloud.AWSCli Integration: matcher.Integration, KubeAppDiscovery: matcher.KubeAppDiscovery, FilterLabels: matcher.Tags, - Log: log, + Logger: logger, SetupAccessForARN: matcher.SetupAccessForARN, DiscoveryConfigName: discoveryConfigName, }, ) if err != nil { - log.WithError(err).Warnf("Could not initialize EKS fetcher(Region=%q, Labels=%q, AssumeRole=%q), skipping.", region, matcher.Tags, matcherAssumeRole.RoleARN) + logger.WarnContext(context.Background(), "Could not initialize EKS fetcher, skipping", + "error", err, + "region", region, + "labels", matcher.Tags, + "assume_role", matcherAssumeRole.RoleARN) continue } kubeFetchers = append(kubeFetchers, fetcher) @@ -178,7 +182,7 @@ func NewEKSFetcher(cfg EKSFetcherConfig) (common.Fetcher, error) { fetcher := &eksFetcher{EKSFetcherConfig: cfg} if err := fetcher.setCallerIdentity(context.Background()); err != nil { - cfg.Log.WithError(err).Warn("Failed to set caller identity.") + cfg.Logger.WarnContext(context.Background(), "Failed to set caller identity", "error", err) } // If the fetcher SetupAccessForARN isn't set, use the caller identity. @@ -290,11 +294,11 @@ func (a *eksFetcher) getEKSClusters(ctx context.Context) (types.KubeClusters, er // trace.CompareFailed is returned if the cluster did not match the matcher filtering labels // or if the cluster is not yet active. if trace.IsCompareFailed(err) { - a.Log.WithError(err).Debugf("Cluster %q did not match the filtering criteria.", clusterName) + a.Logger.DebugContext(groupCtx, "Cluster did not match the filtering criteria", "error", err, "cluster", clusterName) // never return an error otherwise we will impact discovery process return nil } else if err != nil { - a.Log.WithError(err).Warnf("Failed to discover EKS cluster %q.", clusterName) + a.Logger.WarnContext(groupCtx, "Failed to discover EKS cluster", "error", err, "cluster", clusterName) // never return an error otherwise we will impact discovery process return nil } @@ -360,7 +364,7 @@ func (a *eksFetcher) getMatchingKubeCluster(ctx context.Context, clusterName str switch st := aws.StringValue(rsp.Cluster.Status); st { case eks.ClusterStatusUpdating, eks.ClusterStatusActive: - a.Log.WithField("cluster_name", clusterName).Debugf("EKS cluster status is valid: %s", st) + a.Logger.DebugContext(ctx, "EKS cluster status is valid", "status", st, "cluster", clusterName) default: return nil, trace.CompareFailed("EKS cluster %q not enrolled due to its current status: %s", clusterName, st) } @@ -391,8 +395,11 @@ func (a *eksFetcher) getMatchingKubeCluster(ctx context.Context, clusterName str } return cluster, nil default: - a.Log.Infof("EKS cluster %q does not support access bootstrap due to its authentication mode %q. Skipping access setup. Access for ARN %q must be manually configured.", - clusterName, st, a.SetupAccessForARN) + a.Logger.InfoContext(ctx, "EKS cluster must be configured manually due to its authentication mode", + "cluster", clusterName, + "authentication_mode", st, + "access_arn", a.SetupAccessForARN, + ) return cluster, nil } } @@ -433,9 +440,11 @@ func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client eksifa switch { case trace.IsAccessDenied(err): // Access denied means that the principal does not have access to setup access entries for the cluster. - a.Log.WithError(err).Warnf("Access denied to setup access for EKS cluster %q. Please ensure you correctly configured the following permissions: %v", - aws.StringValue(cluster.Name), - eksDiscoveryPermissions) + a.Logger.WarnContext(ctx, "Access denied to setup access for EKS cluster, ensure the required permissions are set", + "error", err, + "cluster", aws.StringValue(cluster.Name), + "required_permissions", eksDiscoveryPermissions, + ) return nil case err == nil: // If the access entry exists and the principal has access to the cluster, check if the teleportKubernetesGroup is part of the Kubernetes group. @@ -448,9 +457,11 @@ func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client eksifa // This temporary access is granted to the identity that the Discovery service fetcher is running as (callerIdentity). If a role is assumed, the callerIdentity is the assumed role. if err := a.temporarilyGainAdminAccessAndCreateRole(ctx, client, cluster); trace.IsAccessDenied(err) { // Access denied means that the principal does not have access to setup access entries for the cluster. - a.Log.WithError(err).Warnf("Access denied to setup access for EKS cluster %q. Please ensure you correctly configured the following permissions: %v", - aws.StringValue(cluster.Name), - eksDiscoveryPermissions) + a.Logger.WarnContext(ctx, "Access denied to setup access for EKS cluster, ensure the required permissions are set", + "error", err, + "cluster", aws.StringValue(cluster.Name), + "required_permissions", eksDiscoveryPermissions, + ) return nil } else if err != nil { return trace.Wrap(err, "unable to setup access for EKS cluster %q", aws.StringValue(cluster.Name)) @@ -460,9 +471,11 @@ func (a *eksFetcher) checkOrSetupAccessForARN(ctx context.Context, client eksifa err = a.upsertAccessEntry(ctx, client, cluster) if trace.IsAccessDenied(err) { // Access denied means that the principal does not have access to setup access entries for the cluster. - a.Log.WithError(err).Warnf("Access denied to setup access for EKS cluster %q. Please ensure you correctly configured the following permissions: %v", - aws.StringValue(cluster.Name), - eksDiscoveryPermissions) + a.Logger.WarnContext(ctx, "Access denied to setup access for EKS cluster, ensure the required permissions are set", + "error", err, + "cluster", aws.StringValue(cluster.Name), + "required_permissions", eksDiscoveryPermissions, + ) return nil } return trace.Wrap(err, "unable to setup access for EKS cluster %q", aws.StringValue(cluster.Name)) @@ -505,7 +518,10 @@ func (a *eksFetcher) temporarilyGainAdminAccessAndCreateRole(ctx context.Context }), ) if err != nil { - a.Log.WithError(err).Warnf("Failed to delete access entry for EKS cluster %q", aws.StringValue(cluster.Name)) + a.Logger.WarnContext(ctx, "Failed to delete access entry for EKS cluster", + "error", err, + "cluster", aws.StringValue(cluster.Name), + ) } }() diff --git a/lib/srv/discovery/fetchers/eks_test.go b/lib/srv/discovery/fetchers/eks_test.go index 84309922bbdae..d7b9c6b4cac47 100644 --- a/lib/srv/discovery/fetchers/eks_test.go +++ b/lib/srv/discovery/fetchers/eks_test.go @@ -29,12 +29,12 @@ import ( "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/srv/discovery/common" + "github.com/gravitational/teleport/lib/utils" ) func TestEKSFetcher(t *testing.T) { @@ -106,7 +106,7 @@ func TestEKSFetcher(t *testing.T) { ClientGetter: &mockEKSClientGetter{}, FilterLabels: tt.args.filterLabels, Region: tt.args.region, - Log: logrus.New(), + Logger: utils.NewSlogLoggerForTests(), } fetcher, err := NewEKSFetcher(cfg) require.NoError(t, err) diff --git a/lib/srv/discovery/fetchers/gke.go b/lib/srv/discovery/fetchers/gke.go index 9a94a663c2a47..29ff143f8cfea 100644 --- a/lib/srv/discovery/fetchers/gke.go +++ b/lib/srv/discovery/fetchers/gke.go @@ -21,10 +21,10 @@ package fetchers import ( "context" "fmt" + "log/slog" containerpb "cloud.google.com/go/container/apiv1/containerpb" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -47,7 +47,7 @@ type GKEFetcherConfig struct { // FilterLabels are the filter criteria. FilterLabels types.Labels // Log is the logger. - Log logrus.FieldLogger + Logger *slog.Logger // DiscoveryConfigName is the name of the discovery config which originated the resource. DiscoveryConfigName string } @@ -68,8 +68,8 @@ func (c *GKEFetcherConfig) CheckAndSetDefaults() error { return trace.BadParameter("missing FilterLabels field") } - if c.Log == nil { - c.Log = logrus.WithField(teleport.ComponentKey, "fetcher:gke") + if c.Logger == nil { + c.Logger = slog.With(teleport.ComponentKey, "fetcher:gke") } return nil } @@ -96,7 +96,7 @@ func (a *gkeFetcher) Get(ctx context.Context) (types.ResourcesWithLabels, error) return nil, trace.Wrap(err) } - a.Log.Debugf("Fetching GKE clusters for project IDs: %v", projectIDs) + a.Logger.DebugContext(ctx, "Fetching GKE clusters for configured projects", "project_ids", projectIDs) var clusters types.KubeClusters for _, projectID := range projectIDs { lClusters, err := a.getGKEClusters(ctx, projectID) @@ -119,10 +119,10 @@ func (a *gkeFetcher) getGKEClusters(ctx context.Context, projectID string) (type // trace.CompareFailed is returned if the cluster did not match the matcher filtering labels // or if the cluster is not yet active. if trace.IsCompareFailed(err) { - a.Log.WithError(err).Debugf("Cluster %q did not match the filtering criteria.", gkeCluster.Name) + a.Logger.DebugContext(ctx, "Cluster did not match the filtering criteria", "error", err, "cluster", gkeCluster.Name) continue } else if err != nil { - a.Log.WithError(err).Warnf("Failed to discover GKE cluster %q.", gkeCluster.Name) + a.Logger.WarnContext(ctx, "Failed to discover GKE cluster", "error", err, "cluster", gkeCluster.Name) continue } clusters = append(clusters, cluster) diff --git a/lib/srv/discovery/fetchers/gke_test.go b/lib/srv/discovery/fetchers/gke_test.go index 53374682b179d..2fac7719d20f9 100644 --- a/lib/srv/discovery/fetchers/gke_test.go +++ b/lib/srv/discovery/fetchers/gke_test.go @@ -23,12 +23,12 @@ import ( "testing" containerpb "cloud.google.com/go/container/apiv1/containerpb" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/cloud/gcp" "github.com/gravitational/teleport/lib/srv/discovery/common" + "github.com/gravitational/teleport/lib/utils" ) func TestGKEFetcher(t *testing.T) { @@ -119,7 +119,7 @@ func TestGKEFetcher(t *testing.T) { FilterLabels: tt.args.filterLabels, Location: tt.args.location, ProjectID: tt.args.projectID, - Log: logrus.New(), + Logger: utils.NewSlogLoggerForTests(), } fetcher, err := NewGKEFetcher(context.Background(), cfg) require.NoError(t, err) diff --git a/lib/srv/discovery/fetchers/kube_services.go b/lib/srv/discovery/fetchers/kube_services.go index bc44a9c5cc153..36060b144ca35 100644 --- a/lib/srv/discovery/fetchers/kube_services.go +++ b/lib/srv/discovery/fetchers/kube_services.go @@ -22,6 +22,7 @@ import ( "context" "crypto/tls" "fmt" + "log/slog" "net/http" "slices" "strconv" @@ -30,7 +31,6 @@ import ( "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,7 +51,7 @@ type KubeAppsFetcherConfig struct { // Namespaces are the kubernetes namespaces in which to discover services Namespaces []string // Log is a logger to use - Log logrus.FieldLogger + Logger *slog.Logger // ProtocolChecker inspects port to find your whether they are HTTP/HTTPS or not. ProtocolChecker ProtocolChecker // DiscoveryConfigName is the name of the discovery config which originated the resource. @@ -66,8 +66,8 @@ func (k *KubeAppsFetcherConfig) CheckAndSetDefaults() error { if k.KubernetesClient == nil { return trace.BadParameter("missing parameter KubernetesClient") } - if k.Log == nil { - return trace.BadParameter("missing parameter Log") + if k.Logger == nil { + return trace.BadParameter("missing parameter Logger") } if k.ClusterName == "" { return trace.BadParameter("missing parameter ClusterName") @@ -142,7 +142,7 @@ func (f *KubeAppFetcher) getServices(ctx context.Context, discoveryType string) } else if match { result = append(result, s) } else { - f.Log.WithField("service_name", s.Name).Debug("Service doesn't match labels.") + f.Logger.DebugContext(ctx, "Service doesn't match labels", "service", s.Name) } } nextToken = kubeServices.Continue @@ -184,7 +184,7 @@ func (f *KubeAppFetcher) Get(ctx context.Context) (types.ResourcesWithLabels, er ports, err := getServicePorts(service) if err != nil { - f.Log.WithError(err).Errorf("could not get ports for the service %q", service.GetName()) + f.Logger.ErrorContext(ctx, "could not get ports for the service", "error", err, "service", service.GetName()) return nil } @@ -207,7 +207,7 @@ func (f *KubeAppFetcher) Get(ctx context.Context) (types.ResourcesWithLabels, er } newApp, err := services.NewApplicationFromKubeService(service, f.ClusterName, portProtocol, port) if err != nil { - f.Log.WithError(err).Warnf("Could not get app from a Kubernetes service %q, port %d", service.GetName(), port.Port) + f.Logger.WarnContext(ctx, "Could not get app from a Kubernetes service", "error", err, "service", service.GetName(), "port", port.Port) return nil } newApps = append(newApps, newApp) diff --git a/lib/srv/discovery/fetchers/kube_services_test.go b/lib/srv/discovery/fetchers/kube_services_test.go index 9b139e35b151f..08eadf4d4292f 100644 --- a/lib/srv/discovery/fetchers/kube_services_test.go +++ b/lib/srv/discovery/fetchers/kube_services_test.go @@ -286,7 +286,7 @@ func TestKubeAppFetcher_Get(t *testing.T) { FilterLabels: tt.matcherLabels, Namespaces: tt.matcherNamespaces, ProtocolChecker: tt.protoChecker, - Log: utils.NewLogger(), + Logger: utils.NewSlogLoggerForTests(), }) require.NoError(t, err) diff --git a/lib/srv/discovery/kube_integration_watcher.go b/lib/srv/discovery/kube_integration_watcher.go index 3ecb000a8edad..859dd13a11949 100644 --- a/lib/srv/discovery/kube_integration_watcher.go +++ b/lib/srv/discovery/kube_integration_watcher.go @@ -71,7 +71,7 @@ func (s *Server) startKubeIntegrationWatchers() error { s.submitFetchersEvent(kubeIntegrationFetchers) return kubeIntegrationFetchers }, - Log: s.LegacyLogger.WithField("kind", types.KindKubernetesCluster), + Logger: s.Log.With("kind", types.KindKubernetesCluster), DiscoveryGroup: s.DiscoveryGroup, Interval: s.PollInterval, Origin: types.OriginCloud, diff --git a/lib/srv/discovery/kube_integration_watcher_test.go b/lib/srv/discovery/kube_integration_watcher_test.go index 566d587635ff6..bde45665b2a28 100644 --- a/lib/srv/discovery/kube_integration_watcher_test.go +++ b/lib/srv/discovery/kube_integration_watcher_test.go @@ -31,7 +31,6 @@ import ( eksV1 "github.com/aws/aws-sdk-go/service/eks" "github.com/google/uuid" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -382,7 +381,6 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { }, Emitter: authClient, Log: libutils.NewSlogLoggerForTests(), - LegacyLogger: logrus.New(), DiscoveryGroup: mainDiscoveryGroup, }) diff --git a/lib/srv/discovery/kube_services_watcher.go b/lib/srv/discovery/kube_services_watcher.go index f9fc7ad47a16e..8e63c6947242a 100644 --- a/lib/srv/discovery/kube_services_watcher.go +++ b/lib/srv/discovery/kube_services_watcher.go @@ -76,7 +76,7 @@ func (s *Server) startKubeAppsWatchers() error { watcher, err := common.NewWatcher(s.ctx, common.WatcherConfig{ FetchersFn: common.StaticFetchers(s.kubeAppsFetchers), Interval: 5 * time.Minute, - Log: s.LegacyLogger.WithField("kind", types.KindApp), + Logger: s.Log.With("kind", types.KindApp), DiscoveryGroup: s.DiscoveryGroup, Origin: types.OriginDiscoveryKubernetes, }) diff --git a/lib/srv/discovery/kube_watcher.go b/lib/srv/discovery/kube_watcher.go index d6cc07aa75ae2..20847f50f069e 100644 --- a/lib/srv/discovery/kube_watcher.go +++ b/lib/srv/discovery/kube_watcher.go @@ -20,7 +20,6 @@ package discovery import ( "context" - "log/slog" "sync" "github.com/gravitational/trace" @@ -61,8 +60,7 @@ func (s *Server) startKubeWatchers() error { defer mu.Unlock() return utils.FromSlice(kubeResources, types.KubeCluster.GetName) }, - // TODO(tross): update to user the server logger once it is converted to use slog - Logger: slog.With("kind", types.KindKubernetesCluster), + Logger: s.Log.With("kind", types.KindKubernetesCluster), OnCreate: s.onKubeCreate, OnUpdate: s.onKubeUpdate, OnDelete: s.onKubeDelete, @@ -78,7 +76,7 @@ func (s *Server) startKubeWatchers() error { s.submitFetchersEvent(kubeNonIntegrationFetchers) return kubeNonIntegrationFetchers }, - Log: s.LegacyLogger.WithField("kind", types.KindKubernetesCluster), + Logger: s.Log.With("kind", types.KindKubernetesCluster), DiscoveryGroup: s.DiscoveryGroup, Interval: s.PollInterval, Origin: types.OriginCloud,