From ca753cf1b86e195d0c603567ca120ff95ac4438a Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:10:30 -0500 Subject: [PATCH 01/20] BED-4153: Begin slog rewrite --- cmd/api/src/api/middleware/logging.go | 36 ++++++++++++++----------- cmd/api/src/cmd/bhapi/main.go | 5 ++++ packages/go/log/handlers/handlers.go | 38 +++++++++++++++++++++++++++ packages/go/log/log.go | 27 +++++++++++++++++++ 4 files changed, 90 insertions(+), 16 deletions(-) create mode 100644 packages/go/log/handlers/handlers.go diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index f16f7db84d..be4d1b60d2 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -17,7 +17,9 @@ package middleware import ( + "fmt" "io" + "log/slog" "net/http" "runtime/debug" "time" @@ -95,17 +97,17 @@ func getSignedRequestDate(request *http.Request) (string, bool) { return requestDateHeader, requestDateHeader != "" } -func setSignedRequestFields(request *http.Request, logEvent log.Event) { +func setSignedRequestFields(request *http.Request, logAttrs []slog.Attr) { // Log the token ID and request date if the request contains either header if requestDateHeader, hasHeader := getSignedRequestDate(request); hasHeader { - logEvent.Str("signed_request_date", requestDateHeader) + logAttrs = append(logAttrs, slog.String("signed_request_date", requestDateHeader)) } if authScheme, schemeParameter, err := parseAuthorizationHeader(request); err == nil { switch authScheme { case api.AuthorizationSchemeBHESignature: if _, err := uuid.FromString(schemeParameter); err == nil { - logEvent.Str("token_id", schemeParameter) + logAttrs = append(logAttrs, slog.String("token_id", schemeParameter)) } } } @@ -117,7 +119,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http return func(next http.Handler) http.Handler { return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) { var ( - logEvent = log.WithLevel(log.LevelInfo) + logAttrs = []slog.Attr{} requestContext = ctx.FromRequest(request) deadline time.Time @@ -144,7 +146,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http // Defer the log statement and then serve the request defer func() { - logEvent.Msgf("%s %s", request.Method, request.URL.RequestURI()) + slog.LogAttrs(nil, slog.LevelInfo, fmt.Sprintf("%s %s", request.Method, request.URL.RequestURI()), logAttrs...) if !deadline.IsZero() && time.Now().After(deadline) { log.Warnf( @@ -159,23 +161,25 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http // Perform auth introspection to log the client/user identity for each call if requestContext.AuthCtx.Authenticated() { if identity, err := idResolver.GetIdentity(requestContext.AuthCtx); err == nil { - logEvent.Str(identity.Key, identity.ID.String()) + logAttrs = append(logAttrs, slog.String(identity.Key, identity.ID.String())) } } // Log the token ID and request date if the request contains either header - setSignedRequestFields(request, logEvent) + setSignedRequestFields(request, logAttrs) // Add the fields that we care about before exiting - logEvent.Str("remote_addr", request.RemoteAddr) - logEvent.Str("proto", request.Proto) - logEvent.Str("referer", request.Referer()) - logEvent.Str("user_agent", request.UserAgent()) - logEvent.Str("request_id", ctx.RequestID(request)) - logEvent.Int64("request_bytes", loggedRequestBody.bytesRead) - logEvent.Int64("response_bytes", loggedResponse.bytesWritten) - logEvent.Int("status", loggedResponse.statusCode) - logEvent.Duration("elapsed", time.Since(requestContext.StartTime.UTC())) + logAttrs = append(logAttrs, + slog.String("remote_addr", request.RemoteAddr), + slog.String("proto", request.Proto), + slog.String("referer", request.Referer()), + slog.String("user_agent", request.UserAgent()), + slog.String("request_id", ctx.RequestID(request)), + slog.Int64("request_bytes", loggedRequestBody.bytesRead), + slog.Int64("response_bytes", loggedResponse.bytesWritten), + slog.Int("status", loggedResponse.statusCode), + slog.Duration("elapsed", time.Since(requestContext.StartTime.UTC())), + ) }) } } diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index cd67a8e191..c3b75da053 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -20,10 +20,12 @@ import ( "context" "flag" "fmt" + "log/slog" "os" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/handlers" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/database" @@ -57,6 +59,9 @@ func main() { printVersion() } + logger := slog.New(&handlers.ContextHandler{Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr})}) + slog.SetDefault(logger) + // Initialize basic logging facilities while we start up log.ConfigureDefaults() diff --git a/packages/go/log/handlers/handlers.go b/packages/go/log/handlers/handlers.go new file mode 100644 index 0000000000..1db68b4f28 --- /dev/null +++ b/packages/go/log/handlers/handlers.go @@ -0,0 +1,38 @@ +package handlers + +import ( + "context" + "log/slog" + + "github.com/specterops/bloodhound/src/ctx" +) + +type ContextHandler struct { + slog.Handler +} + +func (h ContextHandler) Handle(c context.Context, r slog.Record) error { + if bhCtx, ok := c.Value(ctx.ValueKey).(*ctx.Context); ok { + if bhCtx.RequestID != "" { + r.Add(slog.String("request_id", bhCtx.RequestID)) + } + + if bhCtx.RequestIP != "" { + r.Add(slog.String("request_ip", bhCtx.RequestIP)) + } + + if !bhCtx.AuthCtx.Session.UserID.IsNil() { + r.Add("user_id", bhCtx.AuthCtx.Session.UserID) + } + } + + return h.Handler.Handle(c, r) +} + +func ReplaceAttr(_ []string, a slog.Attr) slog.Attr { + if a.Key == slog.MessageKey { + a.Key = "message" + } + + return a +} diff --git a/packages/go/log/log.go b/packages/go/log/log.go index fb334d5de0..7a548bce8a 100644 --- a/packages/go/log/log.go +++ b/packages/go/log/log.go @@ -18,6 +18,7 @@ package log import ( "fmt" + "log/slog" "os" "strings" "sync/atomic" @@ -251,3 +252,29 @@ func LogAndMeasure(level Level, format string, args ...any) func() { } } } + +func SlogMeasure(level slog.Level, format string, args ...any) func() { + then := time.Now() + + return func() { + if elapsed := time.Since(then); elapsed >= measureThreshold { + slog.Log(nil, level, fmt.Sprintf(format, args...), FieldElapsed, elapsed) + } + } +} + +func SlogLogAndMeasure(level slog.Level, format string, args ...any) func() { + var ( + pairID = logMeasurePairCounter.Add(1) + message = fmt.Sprintf(format, args...) + then = time.Now() + ) + + slog.Log(nil, level, message, FieldMeasurementID, pairID) + + return func() { + if elapsed := time.Since(then); elapsed >= measureThreshold { + slog.Log(nil, level, message, FieldMeasurementID, pairID, FieldElapsed, elapsed) + } + } +} From 65367c8811b2e614d7db8137123e7e1f66f99cc8 Mon Sep 17 00:00:00 2001 From: Alyx Holms Date: Wed, 8 Jan 2025 13:15:48 -0700 Subject: [PATCH 02/20] chore: convert all formatted string logs to use `fmt.Sprintf` as an intermediate for slog compatibility --- cmd/api/src/analysis/ad/queries.go | 2 +- cmd/api/src/analysis/azure/queries.go | 2 +- cmd/api/src/api/auth.go | 20 +-- cmd/api/src/api/bloodhoundgraph/properties.go | 3 +- cmd/api/src/api/error.go | 4 +- cmd/api/src/api/marshalling.go | 16 +-- cmd/api/src/api/middleware/compression.go | 4 +- cmd/api/src/api/middleware/logging.go | 4 +- cmd/api/src/api/middleware/middleware.go | 6 +- cmd/api/src/api/static/static.go | 3 +- cmd/api/src/api/tools/dbswitch.go | 3 +- cmd/api/src/api/tools/pg.go | 22 ++-- cmd/api/src/api/v2/agi.go | 16 +-- cmd/api/src/api/v2/analysisrequest.go | 3 +- cmd/api/src/api/v2/apiclient/apiclient.go | 4 +- cmd/api/src/api/v2/apitest/test.go | 3 +- cmd/api/src/api/v2/auth/auth.go | 12 +- cmd/api/src/api/v2/auth/login.go | 2 +- cmd/api/src/api/v2/auth/oidc.go | 14 +-- cmd/api/src/api/v2/auth/saml.go | 30 ++--- cmd/api/src/api/v2/auth/sso.go | 4 +- cmd/api/src/api/v2/collectors.go | 10 +- cmd/api/src/api/v2/cypherquery.go | 3 +- cmd/api/src/api/v2/database_wipe.go | 12 +- cmd/api/src/api/v2/flag.go | 2 +- cmd/api/src/api/v2/integration/api.go | 3 +- cmd/api/src/auth/model.go | 4 +- cmd/api/src/bootstrap/initializer.go | 4 +- cmd/api/src/bootstrap/server.go | 10 +- cmd/api/src/bootstrap/util.go | 6 +- cmd/api/src/cmd/bhapi/main.go | 4 +- cmd/api/src/config/config.go | 12 +- cmd/api/src/daemons/api/bhapi/api.go | 5 +- cmd/api/src/daemons/api/toolapi/api.go | 5 +- cmd/api/src/daemons/daemon.go | 7 +- cmd/api/src/daemons/datapipe/agi.go | 11 +- cmd/api/src/daemons/datapipe/analysis.go | 2 +- .../src/daemons/datapipe/azure_convertors.go | 114 +++++++++--------- cmd/api/src/daemons/datapipe/cleanup.go | 17 +-- cmd/api/src/daemons/datapipe/datapipe.go | 33 ++--- cmd/api/src/daemons/datapipe/decoders.go | 9 +- cmd/api/src/daemons/datapipe/ingest.go | 14 +-- cmd/api/src/daemons/datapipe/jobs.go | 46 +++---- cmd/api/src/database/analysisrequest.go | 9 +- cmd/api/src/database/db.go | 6 +- cmd/api/src/database/log.go | 5 +- cmd/api/src/database/migration/stepwise.go | 8 +- cmd/api/src/migrations/graph.go | 14 +-- cmd/api/src/migrations/manifest.go | 12 +- cmd/api/src/model/appcfg/parameter.go | 20 +-- cmd/api/src/model/audit.go | 2 +- cmd/api/src/model/samlprovider.go | 5 +- cmd/api/src/queries/graph.go | 22 ++-- cmd/api/src/services/agi/agi.go | 3 +- .../src/services/dataquality/dataquality.go | 2 +- cmd/api/src/services/entrypoint.go | 4 +- .../src/services/fileupload/file_upload.go | 14 +-- cmd/api/src/services/fileupload/validation.go | 3 +- cmd/api/src/test/lab/fixtures/api.go | 2 +- .../utils/validation/duration_validator.go | 4 +- packages/go/analysis/ad/ad.go | 14 +-- packages/go/analysis/ad/adcs.go | 44 +++---- packages/go/analysis/ad/adcscache.go | 33 ++--- packages/go/analysis/ad/esc1.go | 13 +- packages/go/analysis/ad/esc10.go | 15 +-- packages/go/analysis/ad/esc13.go | 15 +-- packages/go/analysis/ad/esc3.go | 58 ++++----- packages/go/analysis/ad/esc4.go | 19 +-- packages/go/analysis/ad/esc6.go | 11 +- packages/go/analysis/ad/esc9.go | 15 +-- packages/go/analysis/ad/esc_shared.go | 4 +- packages/go/analysis/ad/membership.go | 2 +- packages/go/analysis/ad/post.go | 10 +- packages/go/analysis/ad/queries.go | 5 +- packages/go/analysis/azure/application.go | 5 +- packages/go/analysis/azure/filters.go | 4 +- packages/go/analysis/azure/post.go | 22 ++-- packages/go/analysis/azure/queries.go | 3 +- .../go/analysis/azure/service_principal.go | 5 +- packages/go/analysis/hybrid/hybrid.go | 2 +- packages/go/analysis/impact/aggregator.go | 4 +- packages/go/analysis/impact/id_aggregator.go | 3 +- packages/go/analysis/post.go | 9 +- packages/go/analysis/post_operation.go | 9 +- packages/go/cache/cache_benchmark_test.go | 2 +- packages/go/conftool/main.go | 13 +- .../models/pgsql/translate/expression.go | 4 +- packages/go/dawgs/drivers/neo4j/cypher.go | 5 +- packages/go/dawgs/drivers/neo4j/index.go | 4 +- .../go/dawgs/drivers/neo4j/transaction.go | 2 +- packages/go/dawgs/drivers/pg/batch.go | 2 +- packages/go/dawgs/drivers/pg/pg.go | 2 +- packages/go/dawgs/drivers/pg/tooling.go | 3 +- packages/go/dawgs/traversal/traversal.go | 18 +-- packages/go/ein/ad.go | 15 +-- packages/go/ein/azure.go | 20 +-- packages/go/log/cmd/logtest/main.go | 12 +- packages/go/schemagen/generator/cue.go | 2 +- packages/go/schemagen/main.go | 15 +-- packages/go/stbernard/analyzers/js/js.go | 4 +- packages/go/stbernard/cmdrunner/cmdrunner.go | 4 +- .../go/stbernard/command/builder/builder.go | 2 +- .../go/stbernard/command/tester/tester.go | 4 +- .../go/stbernard/environment/environment.go | 3 +- packages/go/stbernard/git/git.go | 8 +- packages/go/stbernard/main.go | 11 +- .../go/stbernard/workspace/golang/build.go | 6 +- packages/go/stbernard/workspace/yarn/yarn.go | 2 +- 108 files changed, 589 insertions(+), 543 deletions(-) diff --git a/cmd/api/src/analysis/ad/queries.go b/cmd/api/src/analysis/ad/queries.go index 92e6965184..3686e567e1 100644 --- a/cmd/api/src/analysis/ad/queries.go +++ b/cmd/api/src/analysis/ad/queries.go @@ -87,7 +87,7 @@ func GraphStats(ctx context.Context, db graph.Database) (model.ADDataQualityStat } else { for _, domain := range domains { if domainSID, err := domain.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Domain node %d does not have a valid %s property: %v", domain.ID, common.ObjectID, err) + log.Errorf(fmt.Sprintf("Domain node %d does not have a valid %s property: %v", domain.ID, common.ObjectID, err)) } else { aggregation.Domains++ diff --git a/cmd/api/src/analysis/azure/queries.go b/cmd/api/src/analysis/azure/queries.go index d22c08e160..309a574409 100644 --- a/cmd/api/src/analysis/azure/queries.go +++ b/cmd/api/src/analysis/azure/queries.go @@ -55,7 +55,7 @@ func GraphStats(ctx context.Context, db graph.Database) (model.AzureDataQualityS } else { for _, tenant := range tenants { if tenantObjectID, err := tenant.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err) + log.Errorf(fmt.Sprintf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err)) } else { aggregation.Tenants++ diff --git a/cmd/api/src/api/auth.go b/cmd/api/src/api/auth.go index 559ccba049..9c57508987 100644 --- a/cmd/api/src/api/auth.go +++ b/cmd/api/src/api/auth.go @@ -112,7 +112,7 @@ func (s authenticator) auditLogin(requestContext context.Context, commitID uuid. err := s.db.CreateAuditLog(requestContext, auditLog) if err != nil { - log.Warnf("failed to write login audit log %+v", err) + log.Warnf(fmt.Sprintf("failed to write login audit log %+v", err)) } } @@ -140,7 +140,7 @@ func (s authenticator) LoginWithSecret(ctx context.Context, loginRequest LoginRe auditLogFields := types.JSONUntypedObject{"username": loginRequest.Username, "auth_type": auth.ProviderTypeSecret} if commitID, err := uuid.NewV4(); err != nil { - log.Errorf("Error generating commit ID for login: %s", err) + log.Errorf(fmt.Sprintf("Error generating commit ID for login: %s", err)) return LoginDetails{}, err } else { s.auditLogin(ctx, commitID, model.AuditLogStatusIntent, model.User{}, auditLogFields) @@ -281,7 +281,7 @@ func (s authenticator) ValidateRequestSignature(tokenID uuid.UUID, request *http authToken.LastAccess = time.Now().UTC() if err := s.db.UpdateAuthToken(request.Context(), authToken); err != nil { - log.Errorf("Error updating last access on AuthToken: %v", err) + log.Errorf(fmt.Sprintf("Error updating last access on AuthToken: %v", err)) } if sdtf, ok := readCloser.(*SelfDestructingTempFile); ok { @@ -362,7 +362,7 @@ func (s authenticator) CreateSSOSession(request *http.Request, response http.Res // Generate commit ID for audit logging if commitID, err = uuid.NewV4(); err != nil { - log.Errorf("Error generating commit ID for login: %s", err) + log.Errorf(fmt.Sprintf("Error generating commit ID for login: %s", err)) WriteErrorResponse(requestCtx, BuildErrorResponse(http.StatusInternalServerError, "audit log creation failure", request), response) return } @@ -417,7 +417,7 @@ func (s authenticator) CreateSession(ctx context.Context, user model.User, authP return "", ErrUserDisabled } - log.Infof("Creating session for user: %s(%s)", user.ID, user.PrincipalName) + log.Infof(fmt.Sprintf("Creating session for user: %s(%s)", user.ID, user.PrincipalName)) userSession := model.UserSession{ User: user, @@ -475,16 +475,16 @@ func (s authenticator) ValidateSession(ctx context.Context, jwtTokenString strin return auth.Context{}, err } else if !token.Valid { - log.Infof("Token invalid") + log.Infof(fmt.Sprintf("Token invalid")) return auth.Context{}, ErrInvalidAuth } else if sessionID, err := claims.SessionID(); err != nil { - log.Infof("Session ID %s invalid: %v", claims.Id, err) + log.Infof(fmt.Sprintf("Session ID %s invalid: %v", claims.Id, err)) return auth.Context{}, ErrInvalidAuth } else if session, err := s.db.GetUserSession(ctx, sessionID); err != nil { - log.Infof("Unable to find session %d", sessionID) + log.Infof(fmt.Sprintf("Unable to find session %d", sessionID)) return auth.Context{}, ErrInvalidAuth } else if session.Expired() { - log.Infof("Session %d is expired", sessionID) + log.Infof(fmt.Sprintf("Session %d is expired", sessionID)) return auth.Context{}, ErrInvalidAuth } else { authContext := auth.Context{ @@ -493,7 +493,7 @@ func (s authenticator) ValidateSession(ctx context.Context, jwtTokenString strin } if session.AuthProviderType == model.SessionAuthProviderSecret && session.User.AuthSecret == nil { - log.Infof("No auth secret found for user ID %s", session.UserID.String()) + log.Infof(fmt.Sprintf("No auth secret found for user ID %s", session.UserID.String())) return auth.Context{}, ErrNoUserSecret } else if session.AuthProviderType == model.SessionAuthProviderSecret && session.User.AuthSecret.Expired() { var ( diff --git a/cmd/api/src/api/bloodhoundgraph/properties.go b/cmd/api/src/api/bloodhoundgraph/properties.go index d71f620390..568ee5d2a8 100644 --- a/cmd/api/src/api/bloodhoundgraph/properties.go +++ b/cmd/api/src/api/bloodhoundgraph/properties.go @@ -17,6 +17,7 @@ package bloodhoundgraph import ( + "fmt" "strings" "github.com/specterops/bloodhound/analysis" @@ -32,7 +33,7 @@ import ( func getNodeLevel(target *graph.Node) (int, bool) { if startSystemTags, err := target.Properties.Get(common.SystemTags.String()).String(); err == nil { - log.Debugf("Unable to find a %s property for node %d with kinds %v", common.SystemTags.String(), target.ID, target.Kinds) + log.Debugf(fmt.Sprintf("Unable to find a %s property for node %d with kinds %v", common.SystemTags.String(), target.ID, target.Kinds)) } else if strings.Contains(startSystemTags, ad.AdminTierZero) { return 0, true } diff --git a/cmd/api/src/api/error.go b/cmd/api/src/api/error.go index 59e4838ca5..cb61ebe438 100644 --- a/cmd/api/src/api/error.go +++ b/cmd/api/src/api/error.go @@ -129,7 +129,7 @@ func HandleDatabaseError(request *http.Request, response http.ResponseWriter, er } else if errors.Is(err, context.DeadlineExceeded) { WriteErrorResponse(request.Context(), BuildErrorResponse(http.StatusInternalServerError, ErrorResponseRequestTimeout, request), response) } else { - log.Errorf("Unexpected database error: %v", err) + log.Errorf(fmt.Sprintf("Unexpected database error: %v", err)) WriteErrorResponse(request.Context(), BuildErrorResponse(http.StatusInternalServerError, ErrorResponseDetailsInternalServerError, request), response) } } @@ -140,7 +140,7 @@ func FormatDatabaseError(err error) error { if errors.Is(err, database.ErrNotFound) { return errors.New(ErrorResponseDetailsResourceNotFound) } else { - log.Errorf("Unexpected database error: %v", err) + log.Errorf(fmt.Sprintf("Unexpected database error: %v", err)) return errors.New(ErrorResponseDetailsInternalServerError) } } diff --git a/cmd/api/src/api/marshalling.go b/cmd/api/src/api/marshalling.go index 1958d889d3..dd5cb40471 100644 --- a/cmd/api/src/api/marshalling.go +++ b/cmd/api/src/api/marshalling.go @@ -76,15 +76,15 @@ type ResponseWrapper struct { func WriteErrorResponse(ctx context.Context, untypedError any, response http.ResponseWriter) { switch typedError := untypedError.(type) { case *ErrorResponse: // V1 error handling - log.Warnf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Error) + log.Warnf(fmt.Sprintf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Error)) WriteJSONResponse(context.Background(), typedError.Error, typedError.HTTPStatus, response) case *ErrorWrapper: // V2 error handling - log.Warnf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Errors) + log.Warnf(fmt.Sprintf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Errors)) WriteJSONResponse(ctx, typedError, typedError.HTTPStatus, response) default: - log.Warnf("Failure Writing API Error. Status: %v. Message: %v", http.StatusInternalServerError, "Invalid error format returned") + log.Warnf(fmt.Sprintf("Failure Writing API Error. Status: %v. Message: %v", http.StatusInternalServerError, "Invalid error format returned")) WriteJSONResponse(ctx, "An internal error has occurred that is preventing the service from servicing this request.", http.StatusInternalServerError, response) } } @@ -94,7 +94,7 @@ func WriteErrorResponse(ctx context.Context, untypedError any, response http.Res func WriteBasicResponse(ctx context.Context, inputData any, statusCode int, response http.ResponseWriter) { if data, err := ToJSONRawMessage(inputData); err != nil { - log.Errorf("Failed marshaling data for basic response: %v", err) + log.Errorf(fmt.Sprintf("Failed marshaling data for basic response: %v", err)) response.WriteHeader(http.StatusInternalServerError) } else { WriteJSONResponse(ctx, BasicResponse{ @@ -161,12 +161,12 @@ func WriteResponseWrapperWithTimeWindowAndPagination(ctx context.Context, data a func WriteJSONResponse(_ context.Context, message any, statusCode int, response http.ResponseWriter) { response.Header().Set(headers.ContentType.String(), mediatypes.ApplicationJson.String()) if content, err := json.Marshal(message); err != nil { - log.Errorf("Failed to marshal value into JSON for request: %v: for message: %+v", err, message) + log.Errorf(fmt.Sprintf("Failed to marshal value into JSON for request: %v: for message: %+v", err, message)) response.WriteHeader(http.StatusInternalServerError) } else { response.WriteHeader(statusCode) if written, err := response.Write(content); err != nil { - log.Errorf("Writing API Error. Failed to write JSON response with %d bytes written and error: %v", written, err) + log.Errorf(fmt.Sprintf("Writing API Error. Failed to write JSON response with %d bytes written and error: %v", written, err)) } } } @@ -176,7 +176,7 @@ func WriteCSVResponse(_ context.Context, message model.CSVWriter, statusCode int response.WriteHeader(statusCode) if err := message.WriteCSV(response); err != nil { - log.Errorf("Writing API Error. Failed to write CSV for request: %v", err) + log.Errorf(fmt.Sprintf("Writing API Error. Failed to write CSV for request: %v", err)) } } @@ -186,7 +186,7 @@ func WriteBinaryResponse(_ context.Context, data []byte, filename string, status response.WriteHeader(statusCode) if written, err := response.Write(data); err != nil { - log.Errorf("Writing API Error. Failed to write binary response with %d bytes written and error: %v", written, err) + log.Errorf(fmt.Sprintf("Writing API Error. Failed to write binary response with %d bytes written and error: %v", written, err)) } } diff --git a/cmd/api/src/api/middleware/compression.go b/cmd/api/src/api/middleware/compression.go index d6dd99111a..7ee341ad7b 100644 --- a/cmd/api/src/api/middleware/compression.go +++ b/cmd/api/src/api/middleware/compression.go @@ -65,7 +65,7 @@ func CompressionMiddleware(next http.Handler) http.Handler { request.Body, err = wrapBody(encoding, request.Body) if err != nil { errMsg := fmt.Sprintf("failed to create reader for %s encoding: %v", encoding, err) - log.Warnf(errMsg) + log.Warnf(fmt.Sprintf(errMsg)) if errors.Is(err, errUnsupportedEncoding) { api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusUnsupportedMediaType, fmt.Sprintf("Error trying to read request: %s", errMsg), request), responseWriter) } else { @@ -106,7 +106,7 @@ func wrapBody(encoding string, body io.ReadCloser) (io.ReadCloser, error) { case "deflate": newBody, err = zlib.NewReader(body) default: - log.Infof("Unsupported encoding detected: %s", encoding) + log.Infof(fmt.Sprintf("Unsupported encoding detected: %s", encoding)) err = errUnsupportedEncoding } return newBody, err diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index be4d1b60d2..00c80a5553 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -38,7 +38,7 @@ func PanicHandler(next http.Handler) http.Handler { return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) { defer func() { if recovery := recover(); recovery != nil { - log.Errorf("[panic recovery] %s - [stack trace] %s", recovery, debug.Stack()) + log.Errorf(fmt.Sprintf("[panic recovery] %s - [stack trace] %s", recovery, debug.Stack())) } }() @@ -136,7 +136,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http // assign a deadline, but only if a valid timeout has been supplied via the prefer header timeout, err := RequestWaitDuration(request) if err != nil { - log.Errorf("Error parsing prefer header for timeout: %w", err) + log.Errorf(fmt.Sprintf("Error parsing prefer header for timeout: %w", err)) } else if err == nil && timeout > 0 { deadline = time.Now().Add(timeout * time.Second) } diff --git a/cmd/api/src/api/middleware/middleware.go b/cmd/api/src/api/middleware/middleware.go index 4b3a7cde2d..acf0ca63a3 100644 --- a/cmd/api/src/api/middleware/middleware.go +++ b/cmd/api/src/api/middleware/middleware.go @@ -103,7 +103,7 @@ func ContextMiddleware(next http.Handler) http.Handler { ) if newUUID, err := uuid.NewV4(); err != nil { - log.Errorf("Failed generating a new request UUID: %v", err) + log.Errorf(fmt.Sprintf("Failed generating a new request UUID: %v", err)) requestID = "ERROR" } else { requestID = newUUID.String() @@ -155,14 +155,14 @@ func parseUserIP(r *http.Request) string { // The point of this code is to strip the port, so we don't need to save it. if host, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - log.Warnf("Error parsing remoteAddress '%s': %s", r.RemoteAddr, err) + log.Warnf(fmt.Sprintf("Error parsing remoteAddress '%s': %s", r.RemoteAddr, err)) remoteIp = r.RemoteAddr } else { remoteIp = host } if result := r.Header.Get("X-Forwarded-For"); result == "" { - log.Debugf("No data found in X-Forwarded-For header") + log.Debugf(fmt.Sprintf("No data found in X-Forwarded-For header")) return remoteIp } else { result += "," + remoteIp diff --git a/cmd/api/src/api/static/static.go b/cmd/api/src/api/static/static.go index fcae2512a6..17fd58a01b 100644 --- a/cmd/api/src/api/static/static.go +++ b/cmd/api/src/api/static/static.go @@ -17,6 +17,7 @@ package static import ( + "fmt" "io" "io/fs" "mime" @@ -89,7 +90,7 @@ func serve(cfg AssetConfig, response http.ResponseWriter, request *http.Request) response.Header().Set(headers.StrictTransportSecurity.String(), utils.HSTSSetting) if _, err := io.Copy(response, fin); err != nil { - log.Errorf("Failed flushing static file content for asset %s to client: %v", assetPath, err) + log.Errorf(fmt.Sprintf("Failed flushing static file content for asset %s to client: %v", assetPath, err)) } } } diff --git a/cmd/api/src/api/tools/dbswitch.go b/cmd/api/src/api/tools/dbswitch.go index e5b7b8d208..1d71a5c0ca 100644 --- a/cmd/api/src/api/tools/dbswitch.go +++ b/cmd/api/src/api/tools/dbswitch.go @@ -19,6 +19,7 @@ package tools import ( "context" "errors" + "fmt" "github.com/jackc/pgx/v5" "github.com/specterops/bloodhound/log" @@ -83,7 +84,7 @@ func LookupGraphDriver(ctx context.Context, cfg config.Configuration) (string, e if setDriverName, err := GetGraphDriver(ctx, pgxConn); err != nil { if errors.Is(err, pgx.ErrNoRows) { - log.Infof("No database driver has been set for migration, using: %s", driverName) + log.Infof(fmt.Sprintf("No database driver has been set for migration, using: %s", driverName)) } else { return "", err } diff --git a/cmd/api/src/api/tools/pg.go b/cmd/api/src/api/tools/pg.go index fbd597d9af..0b79b18a76 100644 --- a/cmd/api/src/api/tools/pg.go +++ b/cmd/api/src/api/tools/pg.go @@ -235,7 +235,7 @@ func (s *PGMigrator) SwitchPostgreSQL(response http.ResponseWriter, request *htt "error": fmt.Errorf("failed connecting to PostgreSQL: %w", err), }, http.StatusInternalServerError, response) } else if err := pgDB.AssertSchema(request.Context(), s.graphSchema); err != nil { - log.Errorf("Unable to assert graph schema in PostgreSQL: %v", err) + log.Errorf(fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) } else if err := SetGraphDriver(request.Context(), s.cfg, pg.DriverName); err != nil { api.WriteJSONResponse(request.Context(), map[string]any{ "error": fmt.Errorf("failed updating graph database driver preferences: %w", err), @@ -244,7 +244,7 @@ func (s *PGMigrator) SwitchPostgreSQL(response http.ResponseWriter, request *htt s.graphDBSwitch.Switch(pgDB) response.WriteHeader(http.StatusOK) - log.Infof("Updated default graph driver to PostgreSQL") + log.Infof(fmt.Sprintf("Updated default graph driver to PostgreSQL")) } } @@ -264,7 +264,7 @@ func (s *PGMigrator) SwitchNeo4j(response http.ResponseWriter, request *http.Req s.graphDBSwitch.Switch(neo4jDB) response.WriteHeader(http.StatusOK) - log.Infof("Updated default graph driver to Neo4j") + log.Infof(fmt.Sprintf("Updated default graph driver to Neo4j")) } } @@ -282,7 +282,7 @@ func (s *PGMigrator) startMigration() error { }); err != nil { return fmt.Errorf("failed connecting to PostgreSQL: %w", err) } else { - log.Infof("Dispatching live migration from Neo4j to PostgreSQL") + log.Infof(fmt.Sprintf("Dispatching live migration from Neo4j to PostgreSQL")) migrationCtx, migrationCancelFunc := context.WithCancel(s.serverCtx) s.migrationCancelFunc = migrationCancelFunc @@ -290,22 +290,22 @@ func (s *PGMigrator) startMigration() error { go func(ctx context.Context) { defer migrationCancelFunc() - log.Infof("Starting live migration from Neo4j to PostgreSQL") + log.Infof(fmt.Sprintf("Starting live migration from Neo4j to PostgreSQL")) if err := pgDB.AssertSchema(ctx, s.graphSchema); err != nil { - log.Errorf("Unable to assert graph schema in PostgreSQL: %v", err) + log.Errorf(fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) } else if err := migrateTypes(ctx, neo4jDB, pgDB); err != nil { - log.Errorf("Unable to migrate Neo4j kinds to PostgreSQL: %v", err) + log.Errorf(fmt.Sprintf("Unable to migrate Neo4j kinds to PostgreSQL: %v", err)) } else if nodeIDMappings, err := migrateNodes(ctx, neo4jDB, pgDB); err != nil { - log.Errorf("Failed importing nodes into PostgreSQL: %v", err) + log.Errorf(fmt.Sprintf("Failed importing nodes into PostgreSQL: %v", err)) } else if err := migrateEdges(ctx, neo4jDB, pgDB, nodeIDMappings); err != nil { - log.Errorf("Failed importing edges into PostgreSQL: %v", err) + log.Errorf(fmt.Sprintf("Failed importing edges into PostgreSQL: %v", err)) } else { - log.Infof("Migration to PostgreSQL completed successfully") + log.Infof(fmt.Sprintf("Migration to PostgreSQL completed successfully")) } if err := s.advanceState(stateIdle, stateMigrating, stateCanceling); err != nil { - log.Errorf("Database migration state management error: %v", err) + log.Errorf(fmt.Sprintf("Database migration state management error: %v", err)) } }(migrationCtx) } diff --git a/cmd/api/src/api/v2/agi.go b/cmd/api/src/api/v2/agi.go index 1003c62756..d3ca7fd9b2 100644 --- a/cmd/api/src/api/v2/agi.go +++ b/cmd/api/src/api/v2/agi.go @@ -271,14 +271,14 @@ func (s Resources) UpdateAssetGroupSelectors(response http.ResponseWriter, reque api.HandleDatabaseError(request, response, err) } else { if err := s.GraphQuery.UpdateSelectorTags(request.Context(), s.DB, result); err != nil { - log.Warnf("Failed updating asset group tags; will be retried upon next analysis run: %v", err) + log.Warnf(fmt.Sprintf("Failed updating asset group tags; will be retried upon next analysis run: %v", err)) } if assetGroup.Tag == model.TierZeroAssetGroupTag { // When T0 asset group selectors are modified, entire analysis must be re-run var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf("encountered request analysis for unknown user, this shouldn't happen") + log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-update-asset-group-selectors" } else { userId = user.ID.String() @@ -484,7 +484,7 @@ func parseAGMembersFromNodes(nodes graph.NodeSet, selectors model.AssetGroupSele // a member is custom if at least one selector exists for that object ID for _, agSelector := range selectors { if objectId, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Warnf("Objectid is missing for node %d", node.ID) + log.Warnf(fmt.Sprintf("Objectid is missing for node %d", node.ID)) } else if agSelector.Selector == objectId { isCustomMember = true } @@ -496,14 +496,14 @@ func parseAGMembersFromNodes(nodes graph.NodeSet, selectors model.AssetGroupSele ) if objectId, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Warnf("Objectid is missing for node %d", node.ID) + log.Warnf(fmt.Sprintf("Objectid is missing for node %d", node.ID)) memberObjectId = "" } else { memberObjectId = objectId } if name, err := node.Properties.Get(common.Name.String()).String(); err != nil { - log.Warnf("Name is missing for node %d", node.ID) + log.Warnf(fmt.Sprintf("Name is missing for node %d", node.ID)) memberName = "" } else { memberName = name @@ -520,20 +520,20 @@ func parseAGMembersFromNodes(nodes graph.NodeSet, selectors model.AssetGroupSele if node.Kinds.ContainsOneOf(azure.Entity) { if tenantID, err := node.Properties.Get(azure.TenantID.String()).String(); err != nil { - log.Warnf("%s is missing for node %d", azure.TenantID.String(), node.ID) + log.Warnf(fmt.Sprintf("%s is missing for node %d", azure.TenantID.String(), node.ID)) } else { agMember.EnvironmentKind = azure.Tenant.String() agMember.EnvironmentID = tenantID } } else if node.Kinds.ContainsOneOf(ad.Entity) { if domainSID, err := node.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("%s is missing for node %d", ad.DomainSID.String(), node.ID) + log.Warnf(fmt.Sprintf("%s is missing for node %d", ad.DomainSID.String(), node.ID)) } else { agMember.EnvironmentKind = ad.Domain.String() agMember.EnvironmentID = domainSID } } else { - log.Warnf("Node %d is missing valid base entity, skipping AG Membership...", node.ID) + log.Warnf(fmt.Sprintf("Node %d is missing valid base entity, skipping AG Membership...", node.ID)) continue } diff --git a/cmd/api/src/api/v2/analysisrequest.go b/cmd/api/src/api/v2/analysisrequest.go index efdae9533d..1a6f1b3ddd 100644 --- a/cmd/api/src/api/v2/analysisrequest.go +++ b/cmd/api/src/api/v2/analysisrequest.go @@ -19,6 +19,7 @@ package v2 import ( "database/sql" "errors" + "fmt" "net/http" "github.com/specterops/bloodhound/log" @@ -43,7 +44,7 @@ func (s Resources) RequestAnalysis(response http.ResponseWriter, request *http.R var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf("encountered request analysis for unknown user, this shouldn't happen") + log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user" } else { userId = user.ID.String() diff --git a/cmd/api/src/api/v2/apiclient/apiclient.go b/cmd/api/src/api/v2/apiclient/apiclient.go index 55ec67813c..44801c0fe5 100644 --- a/cmd/api/src/api/v2/apiclient/apiclient.go +++ b/cmd/api/src/api/v2/apiclient/apiclient.go @@ -114,7 +114,7 @@ func (s Client) ZipRequest(method, path string, params url.Values, body []byte) return nil, fmt.Errorf("waited %f seconds while retrying - Request failure cause: %w", maxSleep.Seconds(), err) } - log.Infof("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err) + log.Infof(fmt.Sprintf("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err)) time.Sleep(sleepInterval) } else { return response, nil @@ -170,7 +170,7 @@ func (s Client) Request(method, path string, params url.Values, body any, header return nil, fmt.Errorf("waited %f seconds while retrying - Request failure cause: %w", maxSleep.Seconds(), err) } - log.Infof("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err) + log.Infof(fmt.Sprintf("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err)) time.Sleep(sleepInterval) } else { return response, nil diff --git a/cmd/api/src/api/v2/apitest/test.go b/cmd/api/src/api/v2/apitest/test.go index 7e0ff0a01e..77f815b687 100644 --- a/cmd/api/src/api/v2/apitest/test.go +++ b/cmd/api/src/api/v2/apitest/test.go @@ -17,6 +17,7 @@ package apitest import ( + "fmt" "log" "github.com/specterops/bloodhound/src/api" @@ -30,7 +31,7 @@ import ( func NewAuthManagementResource(mockCtrl *gomock.Controller) (auth.ManagementResource, *mocks.MockDatabase) { cfg, err := config.NewDefaultConfiguration() if err != nil { - log.Fatalf("Failed to create default configuration: %v", err) + log.Fatalf(fmt.Sprintf("Failed to create default configuration: %v", err)) } cfg.Crypto.Argon2.NumIterations = 1 diff --git a/cmd/api/src/api/v2/auth/auth.go b/cmd/api/src/api/v2/auth/auth.go index ea140119ac..5f107b0368 100644 --- a/cmd/api/src/api/v2/auth/auth.go +++ b/cmd/api/src/api/v2/auth/auth.go @@ -317,7 +317,7 @@ func (s ManagementResource) CreateUser(response http.ResponseWriter, request *ht api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, errs.Error(), request), response) return } else if secretDigest, err := s.secretDigester.Digest(createUserRequest.Secret); err != nil { - log.Errorf("Error while attempting to digest secret for user: %v", err) + log.Errorf(fmt.Sprintf("Error while attempting to digest secret for user: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) return } else { @@ -340,7 +340,7 @@ func (s ManagementResource) CreateUser(response http.ResponseWriter, request *ht api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("SAML Provider ID must be a number: %v", err.Error()), request), response) return } else if samlProvider, err := s.db.GetSAMLProvider(request.Context(), samlProviderID); err != nil { - log.Errorf("Error while attempting to fetch SAML provider %s: %v", createUserRequest.SAMLProviderID, err) + log.Errorf(fmt.Sprintf("Error while attempting to fetch SAML provider %s: %v", createUserRequest.SAMLProviderID, err)) api.HandleDatabaseError(request, response, err) return } else { @@ -551,7 +551,7 @@ func (s ManagementResource) PutUserAuthSecret(response http.ResponseWriter, requ passwordExpiration := appcfg.GetPasswordExpiration(request.Context(), s.db) if secretDigest, err := s.secretDigester.Digest(setUserSecretRequest.Secret); err != nil { - log.Errorf("Error while attempting to digest secret for user: %v", err) + log.Errorf(fmt.Sprintf("Error while attempting to digest secret for user: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { authSecret.UserID = targetUser.ID @@ -749,11 +749,11 @@ func (s ManagementResource) DeleteAuthToken(response http.ResponseWriter, reques if err := s.db.AppendAuditLog(request.Context(), auditLogEntry); err != nil { // We want to keep err scoped because response trumps this error if errors.Is(err, database.ErrNotFound) { - log.Errorf("resource not found: %v", err) + log.Errorf(fmt.Sprintf("resource not found: %v", err)) } else if errors.Is(err, context.DeadlineExceeded) { - log.Errorf("context deadline exceeded: %v", err) + log.Errorf(fmt.Sprintf("context deadline exceeded: %v", err)) } else { - log.Errorf("unexpected database error: %v", err) + log.Errorf(fmt.Sprintf("unexpected database error: %v", err)) } } } diff --git a/cmd/api/src/api/v2/auth/login.go b/cmd/api/src/api/v2/auth/login.go index 1bbd14ed91..505f6c5620 100644 --- a/cmd/api/src/api/v2/auth/login.go +++ b/cmd/api/src/api/v2/auth/login.go @@ -55,7 +55,7 @@ func (s LoginResource) loginSecret(loginRequest api.LoginRequest, response http. } else if errors.Is(err, api.ErrUserDisabled) { api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusForbidden, err.Error(), request), response) } else { - log.Errorf("Error during authentication for request ID %s: %v", ctx.RequestID(request), err) + log.Errorf(fmt.Sprintf("Error during authentication for request ID %s: %v", ctx.RequestID(request), err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } } else { diff --git a/cmd/api/src/api/v2/auth/oidc.go b/cmd/api/src/api/v2/auth/oidc.go index 0603191ed1..afa7ebd72e 100644 --- a/cmd/api/src/api/v2/auth/oidc.go +++ b/cmd/api/src/api/v2/auth/oidc.go @@ -34,7 +34,7 @@ import ( "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/api" - "github.com/specterops/bloodhound/src/api/v2" + v2 "github.com/specterops/bloodhound/src/api/v2" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/ctx" "github.com/specterops/bloodhound/src/database" @@ -163,11 +163,11 @@ func (s ManagementResource) OIDCLoginHandler(response http.ResponseWriter, reque // SSO misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if state, err := config.GenerateRandomBase64String(77); err != nil { - log.Errorf("[OIDC] Failed to generate state: %v", err) + log.Errorf(fmt.Sprintf("[OIDC] Failed to generate state: %v", err)) // Technical issues scenario v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else if provider, err := oidc.NewProvider(request.Context(), ssoProvider.OIDCProvider.Issuer); err != nil { - log.Errorf("[OIDC] Failed to create OIDC provider: %v", err) + log.Errorf(fmt.Sprintf("[OIDC] Failed to create OIDC provider: %v", err)) // SSO misconfiguration or technical issue // Treat this as a misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") @@ -221,20 +221,20 @@ func (s ManagementResource) OIDCCallbackHandler(response http.ResponseWriter, re // Invalid state - treat as technical issue or misconfiguration v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else if provider, err := oidc.NewProvider(request.Context(), ssoProvider.OIDCProvider.Issuer); err != nil { - log.Errorf("[OIDC] Failed to create OIDC provider: %v", err) + log.Errorf(fmt.Sprintf("[OIDC] Failed to create OIDC provider: %v", err)) // SSO misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if claims, err := getOIDCClaims(request.Context(), provider, ssoProvider, pkceVerifier, code[0]); err != nil { - log.Errorf("[OIDC] %v", err) + log.Errorf(fmt.Sprintf("[OIDC] %v", err)) v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else if email, err := getEmailFromOIDCClaims(claims); errors.Is(err, ErrEmailMissing) { // Note email claims are not always present so we will check different claim keys for possible email - log.Errorf("[OIDC] Claims did not contain any valid email address") + log.Errorf(fmt.Sprintf("[OIDC] Claims did not contain any valid email address")) v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else { if ssoProvider.Config.AutoProvision.Enabled { if err := jitOIDCUserCreation(request.Context(), ssoProvider, email, claims, s.db); err != nil { // It is safe to let this request drop into the CreateSSOSession function below to ensure proper audit logging - log.Errorf("[OIDC] Error during JIT User Creation: %v", err) + log.Errorf(fmt.Sprintf("[OIDC] Error during JIT User Creation: %v", err)) } } diff --git a/cmd/api/src/api/v2/auth/saml.go b/cmd/api/src/api/v2/auth/saml.go index 15c7bdf974..f79c45a800 100644 --- a/cmd/api/src/api/v2/auth/saml.go +++ b/cmd/api/src/api/v2/auth/saml.go @@ -35,7 +35,7 @@ import ( "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/api" - "github.com/specterops/bloodhound/src/api/v2" + v2 "github.com/specterops/bloodhound/src/api/v2" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" "github.com/specterops/bloodhound/src/database" @@ -345,12 +345,12 @@ func (s ManagementResource) ServeMetadata(response http.ResponseWriter, request } else { // Note: This is the samlsp metadata tied to authenticate flow and will not be the same as the XML metadata used to import the SAML provider initially if content, err := xml.MarshalIndent(serviceProvider.Metadata(), "", " "); err != nil { - log.Errorf("[SAML] XML marshalling failure during service provider encoding for %s: %v", ssoProvider.SAMLProvider.IssuerURI, err) + log.Errorf(fmt.Sprintf("[SAML] XML marshalling failure during service provider encoding for %s: %v", ssoProvider.SAMLProvider.IssuerURI, err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { response.Header().Set(headers.ContentType.String(), mediatypes.ApplicationSamlmetadataXml.String()) if _, err := response.Write(content); err != nil { - log.Errorf("[SAML] Failed to write response for serving metadata: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Failed to write response for serving metadata: %v", err)) } } } @@ -370,7 +370,7 @@ func (s ManagementResource) ServeSigningCertificate(response http.ResponseWriter // Note this is the public cert not necessarily the IDP cert response.Header().Set(headers.ContentDisposition.String(), fmt.Sprintf("attachment; filename=\"%s-signing-certificate.pem\"", ssoProvider.Slug)) if _, err := response.Write([]byte(crypto.FormatCert(s.config.SAML.ServiceProviderCertificate))); err != nil { - log.Errorf("[SAML] Failed to write response for serving signing certificate: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Failed to write response for serving signing certificate: %v", err)) } } } @@ -382,7 +382,7 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if serviceProvider, err := auth.NewServiceProvider(*ctx.Get(request.Context()).Host, s.config, *ssoProvider.SAMLProvider); err != nil { - log.Errorf("[SAML] Service provider creation failed: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Service provider creation failed: %v", err)) // Technical issues scenario v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else { @@ -397,7 +397,7 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque // TODO: add actual relay state support - BED-5071 if authReq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation, binding, saml.HTTPPostBinding); err != nil { - log.Errorf("[SAML] Failed creating SAML authentication request: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Failed creating SAML authentication request: %v", err)) // SAML misconfiguration or technical issue // Since this likely indicates a configuration problem, we treat it as a misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") @@ -405,7 +405,7 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque switch binding { case saml.HTTPRedirectBinding: if redirectURL, err := authReq.Redirect("", &serviceProvider); err != nil { - log.Errorf("[SAML] Failed to format a redirect for SAML provider %s: %v", serviceProvider.EntityID, err) + log.Errorf(fmt.Sprintf("[SAML] Failed to format a redirect for SAML provider %s: %v", serviceProvider.EntityID, err)) // Likely a technical or configuration issue v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else { @@ -419,13 +419,13 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque response.WriteHeader(http.StatusOK) if _, err := response.Write([]byte(fmt.Sprintf(authInitiationContentBodyFormat, authReq.Post("")))); err != nil { - log.Errorf("[SAML] Failed to write response with HTTP POST binding: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Failed to write response with HTTP POST binding: %v", err)) // Technical issues scenario v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } default: - log.Errorf("[SAML] Unhandled binding type %s", binding) + log.Errorf(fmt.Sprintf("[SAML] Unhandled binding type %s", binding)) // Treating unknown binding as a misconfiguration v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } @@ -439,10 +439,10 @@ func (s ManagementResource) SAMLCallbackHandler(response http.ResponseWriter, re // SAML misconfiguration v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if serviceProvider, err := auth.NewServiceProvider(*ctx.Get(request.Context()).Host, s.config, *ssoProvider.SAMLProvider); err != nil { - log.Errorf("[SAML] Service provider creation failed: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Service provider creation failed: %v", err)) v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else if err := request.ParseForm(); err != nil { - log.Errorf("[SAML] Failed to parse form POST: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Failed to parse form POST: %v", err)) // Technical issues or invalid form data // This is not covered by acceptance criteria directly; treat as technical issue v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") @@ -450,21 +450,21 @@ func (s ManagementResource) SAMLCallbackHandler(response http.ResponseWriter, re var typedErr *saml.InvalidResponseError switch { case errors.As(err, &typedErr): - log.Errorf("[SAML] Failed to parse ACS response for provider %s: %v - %s", ssoProvider.SAMLProvider.IssuerURI, typedErr.PrivateErr, typedErr.Response) + log.Errorf(fmt.Sprintf("[SAML] Failed to parse ACS response for provider %s: %v - %s", ssoProvider.SAMLProvider.IssuerURI, typedErr.PrivateErr, typedErr.Response)) default: - log.Errorf("[SAML] Failed to parse ACS response for provider %s: %v", ssoProvider.SAMLProvider.IssuerURI, err) + log.Errorf(fmt.Sprintf("[SAML] Failed to parse ACS response for provider %s: %v", ssoProvider.SAMLProvider.IssuerURI, err)) } // SAML credentials issue scenario (authentication failed) v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else if principalName, err := ssoProvider.SAMLProvider.GetSAMLUserPrincipalNameFromAssertion(assertion); err != nil { - log.Errorf("[SAML] Failed to lookup user for SAML provider %s: %v", ssoProvider.Name, err) + log.Errorf(fmt.Sprintf("[SAML] Failed to lookup user for SAML provider %s: %v", ssoProvider.Name, err)) // SAML credentials issue scenario again v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else { if ssoProvider.Config.AutoProvision.Enabled { if err := jitSAMLUserCreation(request.Context(), ssoProvider, principalName, assertion, s.db); err != nil { // It is safe to let this request drop into the CreateSSOSession function below to ensure proper audit logging - log.Errorf("[SAML] Error during JIT User Creation: %v", err) + log.Errorf(fmt.Sprintf("[SAML] Error during JIT User Creation: %v", err)) } } diff --git a/cmd/api/src/api/v2/auth/sso.go b/cmd/api/src/api/v2/auth/sso.go index 643e03274e..e51dbd6520 100644 --- a/cmd/api/src/api/v2/auth/sso.go +++ b/cmd/api/src/api/v2/auth/sso.go @@ -278,9 +278,9 @@ func SanitizeAndGetRoles(ctx context.Context, autoProvisionConfig model.SSOProvi case len(validRoles) == 1: return validRoles, nil case len(validRoles) > 1: - log.Warnf("[SSO] JIT Role Provision detected multiple valid roles - %s , falling back to default role %s", validRoles.Names(), defaultRole.Name) + log.Warnf(fmt.Sprintf("[SSO] JIT Role Provision detected multiple valid roles - %s , falling back to default role %s", validRoles.Names(), defaultRole.Name)) default: - log.Warnf("[SSO] JIT Role Provision detected no valid roles from %s , falling back to default role %s", maybeBHRoles, defaultRole.Name) + log.Warnf(fmt.Sprintf("[SSO] JIT Role Provision detected no valid roles from %s , falling back to default role %s", maybeBHRoles, defaultRole.Name)) } } diff --git a/cmd/api/src/api/v2/collectors.go b/cmd/api/src/api/v2/collectors.go index 64d0c09dc2..8850943aae 100644 --- a/cmd/api/src/api/v2/collectors.go +++ b/cmd/api/src/api/v2/collectors.go @@ -64,7 +64,7 @@ func (s *Resources) GetCollectorManifest(response http.ResponseWriter, request * if CollectorType(collectorType).String() == "InvalidCollectorType" { api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("Invalid collector type: %s", collectorType), request), response) } else if collectorManifest, ok := s.CollectorManifests[collectorType]; !ok { - log.Errorf("Manifest doesn't exist for %s collector", collectorType) + log.Errorf(fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { api.WriteBasicResponse(request.Context(), collectorManifest, http.StatusOK, response) @@ -84,7 +84,7 @@ func (s *Resources) DownloadCollectorByVersion(response http.ResponseWriter, req api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("Invalid collector type: %s", collectorType), request), response) } else if releaseTag == "latest" { if collectorManifest, ok := s.CollectorManifests[collectorType]; !ok { - log.Errorf("Manifest doesn't exist for %s collector", collectorType) + log.Errorf(fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) return } else { @@ -95,7 +95,7 @@ func (s *Resources) DownloadCollectorByVersion(response http.ResponseWriter, req } if data, err := os.ReadFile(filepath.Join(s.Config.CollectorsDirectory(), collectorType, fileName)); err != nil { - log.Errorf("Could not open collector file for download: %v", err) + log.Errorf(fmt.Sprintf("Could not open collector file for download: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { api.WriteBinaryResponse(request.Context(), data, fileName, http.StatusOK, response) @@ -115,7 +115,7 @@ func (s *Resources) DownloadCollectorChecksumByVersion(response http.ResponseWri api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("Invalid collector type: %s", collectorType), request), response) } else if releaseTag == "latest" { if collectorManifest, ok := s.CollectorManifests[collectorType]; !ok { - log.Errorf("Manifest doesn't exist for %s collector", collectorType) + log.Errorf(fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) return } else { @@ -126,7 +126,7 @@ func (s *Resources) DownloadCollectorChecksumByVersion(response http.ResponseWri } if data, err := os.ReadFile(filepath.Join(s.Config.CollectorsDirectory(), collectorType, fileName)); err != nil { - log.Errorf("Could not open collector file for download: %v", err) + log.Errorf(fmt.Sprintf("Could not open collector file for download: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { api.WriteBinaryResponse(request.Context(), data, fileName, http.StatusOK, response) diff --git a/cmd/api/src/api/v2/cypherquery.go b/cmd/api/src/api/v2/cypherquery.go index bcef391b9e..0091b5c256 100644 --- a/cmd/api/src/api/v2/cypherquery.go +++ b/cmd/api/src/api/v2/cypherquery.go @@ -18,6 +18,7 @@ package v2 import ( "errors" + "fmt" "net/http" "github.com/specterops/bloodhound/dawgs/util" @@ -107,7 +108,7 @@ func (s Resources) cypherMutation(request *http.Request, preparedQuery queries.P if err := s.DB.AppendAuditLog(request.Context(), auditLogEntry); err != nil { // We want to keep err scoped because having info on the mutation graph response trumps this error - log.Errorf("failure to create mutation audit log %s", err.Error()) + log.Errorf(fmt.Sprintf("failure to create mutation audit log %s", err.Error())) } return graphResponse, err diff --git a/cmd/api/src/api/v2/database_wipe.go b/cmd/api/src/api/v2/database_wipe.go index e3769fc98d..3ae1ddc297 100644 --- a/cmd/api/src/api/v2/database_wipe.go +++ b/cmd/api/src/api/v2/database_wipe.go @@ -112,7 +112,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt } else { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf("encountered request analysis for unknown user, this shouldn't happen") + log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-database-wipe" } else { userId = user.ID.String() @@ -140,7 +140,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt if kickoffAnalysis { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf("encountered request analysis for unknown user, this shouldn't happen") + log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-database-wipe" } else { userId = user.ID.String() @@ -183,7 +183,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt func (s Resources) deleteHighValueSelectors(ctx context.Context, auditEntry *model.AuditEntry, assetGroupIDs []int) (failure bool) { if err := s.DB.DeleteAssetGroupSelectorsForAssetGroups(ctx, assetGroupIDs); err != nil { - log.Errorf("%s: %s", "there was an error deleting asset group selectors ", err.Error()) + log.Errorf(fmt.Sprintf("%s: %s", "there was an error deleting asset group selectors ", err.Error())) s.handleAuditLogForDatabaseWipe(ctx, auditEntry, false, "high value selectors") return true } else { @@ -195,7 +195,7 @@ func (s Resources) deleteHighValueSelectors(ctx context.Context, auditEntry *mod func (s Resources) deleteFileIngestHistory(ctx context.Context, auditEntry *model.AuditEntry) (failure bool) { if err := s.DB.DeleteAllFileUploads(ctx); err != nil { - log.Errorf("%s: %s", "there was an error deleting file ingest history", err.Error()) + log.Errorf(fmt.Sprintf("%s: %s", "there was an error deleting file ingest history", err.Error())) s.handleAuditLogForDatabaseWipe(ctx, auditEntry, false, "file ingest history") return true } else { @@ -206,7 +206,7 @@ func (s Resources) deleteFileIngestHistory(ctx context.Context, auditEntry *mode func (s Resources) deleteDataQualityHistory(ctx context.Context, auditEntry *model.AuditEntry) (failure bool) { if err := s.DB.DeleteAllDataQuality(ctx); err != nil { - log.Errorf("%s: %s", "there was an error deleting data quality history", err.Error()) + log.Errorf(fmt.Sprintf("%s: %s", "there was an error deleting data quality history", err.Error())) s.handleAuditLogForDatabaseWipe(ctx, auditEntry, false, "data quality history") return true } else { @@ -229,6 +229,6 @@ func (s Resources) handleAuditLogForDatabaseWipe(ctx context.Context, auditEntry } if err := s.DB.AppendAuditLog(ctx, *auditEntry); err != nil { - log.Errorf("%s: %s", "error writing to audit log", err.Error()) + log.Errorf(fmt.Sprintf("%s: %s", "error writing to audit log", err.Error())) } } diff --git a/cmd/api/src/api/v2/flag.go b/cmd/api/src/api/v2/flag.go index c07077978b..4abeb09a66 100644 --- a/cmd/api/src/api/v2/flag.go +++ b/cmd/api/src/api/v2/flag.go @@ -64,7 +64,7 @@ func (s Resources) ToggleFlag(response http.ResponseWriter, request *http.Reques if featureFlag.Key == appcfg.FeatureAdcs && !featureFlag.Enabled { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf("encountered request analysis for unknown user, this shouldn't happen") + log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-toggle-flag" } else { userId = user.ID.String() diff --git a/cmd/api/src/api/v2/integration/api.go b/cmd/api/src/api/v2/integration/api.go index c8e2824a74..d1fb70fd2b 100644 --- a/cmd/api/src/api/v2/integration/api.go +++ b/cmd/api/src/api/v2/integration/api.go @@ -18,6 +18,7 @@ package integration import ( "context" + "fmt" "net/http" "time" @@ -94,7 +95,7 @@ func (s *Context) EnableAPI() { } if err := initializer.Launch(s.ctx, false); err != nil { - log.Errorf("Failed launching API server: %v", err) + log.Errorf(fmt.Sprintf("Failed launching API server: %v", err)) } }() } diff --git a/cmd/api/src/auth/model.go b/cmd/api/src/auth/model.go index 819a09b4d7..5b11e0817d 100644 --- a/cmd/api/src/auth/model.go +++ b/cmd/api/src/auth/model.go @@ -163,10 +163,10 @@ func (s Authorizer) AuditLogUnauthorizedAccess(request *http.Request) { if request.Method != "GET" { data := model.AuditData{"endpoint": request.Method + " " + request.URL.Path} if auditEntry, err := model.NewAuditEntry(model.AuditLogActionUnauthorizedAccessAttempt, model.AuditLogStatusFailure, data); err != nil { - log.Errorf("Error creating audit log for unauthorized access: %s", err.Error()) + log.Errorf(fmt.Sprintf("Error creating audit log for unauthorized access: %s", err.Error())) return } else if err = s.auditLogger.AppendAuditLog(request.Context(), auditEntry); err != nil { - log.Errorf("Error creating audit log for unauthorized access: %s", err.Error()) + log.Errorf(fmt.Sprintf("Error creating audit log for unauthorized access: %s", err.Error())) } } } diff --git a/cmd/api/src/bootstrap/initializer.go b/cmd/api/src/bootstrap/initializer.go index 47f669d3b2..f55603419b 100644 --- a/cmd/api/src/bootstrap/initializer.go +++ b/cmd/api/src/bootstrap/initializer.go @@ -86,10 +86,10 @@ func (s Initializer[DBType, GraphType]) Launch(parentCtx context.Context, handle } // Log successful start and wait for a signal to exit - log.Infof("Server started successfully") + log.Infof(fmt.Sprintf("Server started successfully")) <-ctx.Done() - log.Infof("Shutting down") + log.Infof(fmt.Sprintf("Shutting down")) // TODO: Refactor this pattern in favor of context handling daemonManager.Stop() diff --git a/cmd/api/src/bootstrap/server.go b/cmd/api/src/bootstrap/server.go index 5ff9050eb6..bc123f11a2 100644 --- a/cmd/api/src/bootstrap/server.go +++ b/cmd/api/src/bootstrap/server.go @@ -115,11 +115,11 @@ func MigrateDB(ctx context.Context, cfg config.Configuration, db database.Databa paddingString := strings.Repeat(" ", len(passwordMsg)-2) borderString := strings.Repeat("#", len(passwordMsg)) - log.Infof("%s", borderString) - log.Infof("#%s#", paddingString) - log.Infof("%s", passwordMsg) - log.Infof("#%s#", paddingString) - log.Infof("%s", borderString) + log.Infof(fmt.Sprintf("%s", borderString)) + log.Infof(fmt.Sprintf("#%s#", paddingString)) + log.Infof(fmt.Sprintf("%s", passwordMsg)) + log.Infof(fmt.Sprintf("#%s#", paddingString)) + log.Infof(fmt.Sprintf("%s", borderString)) } } diff --git a/cmd/api/src/bootstrap/util.go b/cmd/api/src/bootstrap/util.go index 77c4c8a836..8fe3c18d14 100644 --- a/cmd/api/src/bootstrap/util.go +++ b/cmd/api/src/bootstrap/util.go @@ -80,11 +80,11 @@ func ConnectGraph(ctx context.Context, cfg config.Configuration) (*graph.Databas } else { switch driverName { case neo4j.DriverName: - log.Infof("Connecting to graph using Neo4j") + log.Infof(fmt.Sprintf("Connecting to graph using Neo4j")) connectionString = cfg.Neo4J.Neo4jConnectionString() case pg.DriverName: - log.Infof("Connecting to graph using PostgreSQL") + log.Infof(fmt.Sprintf("Connecting to graph using PostgreSQL")) connectionString = cfg.Database.PostgreSQLConnectionString() default: @@ -118,6 +118,6 @@ func InitializeLogging(cfg config.Configuration) error { log.Configure(log.DefaultConfiguration().WithLevel(logLevel)) - log.Infof("Logging configured") + log.Infof(fmt.Sprintf("Logging configured")) return nil } diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index c3b75da053..615d077e02 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -66,7 +66,7 @@ func main() { log.ConfigureDefaults() if cfg, err := config.GetConfiguration(configFilePath, config.NewDefaultConfiguration); err != nil { - log.Fatalf("Unable to read configuration %s: %v", configFilePath, err) + log.Fatalf(fmt.Sprintf("Unable to read configuration %s: %v", configFilePath, err)) } else { initializer := bootstrap.Initializer[*database.BloodhoundDB, *graph.DatabaseSwitch]{ Configuration: cfg, @@ -76,7 +76,7 @@ func main() { } if err := initializer.Launch(context.Background(), true); err != nil { - log.Fatalf("Failed starting the server: %v", err) + log.Fatalf(fmt.Sprintf("Failed starting the server: %v", err)) } } } diff --git a/cmd/api/src/config/config.go b/cmd/api/src/config/config.go index a1f38965ea..a7426726bd 100644 --- a/cmd/api/src/config/config.go +++ b/cmd/api/src/config/config.go @@ -246,13 +246,13 @@ func SetValuesFromEnv(varPrefix string, target any, env []string) error { cfgKeyPath := strings.TrimPrefix(key, formattedPrefix) if err := SetValue(target, cfgKeyPath, valueStr); errors.Is(err, ErrInvalidConfigurationPath) { - log.Warnf("%s", err) + log.Warnf(fmt.Sprintf("%s", err)) } else if err != nil { return err } } } else { - log.Errorf("Invalid key/value pair: %+v", kvParts) + log.Errorf(fmt.Sprintf("Invalid key/value pair: %+v", kvParts)) } } @@ -263,11 +263,11 @@ func getConfiguration(path string, defaultConfigFunc func() (Configuration, erro if hasCfgFile, err := HasConfigurationFile(path); err != nil { return Configuration{}, err } else if hasCfgFile { - log.Infof("Reading configuration found at %s", path) + log.Infof(fmt.Sprintf("Reading configuration found at %s", path)) return ReadConfigurationFile(path) } else { - log.Infof("No configuration file found at %s. Returning defaults.", path) + log.Infof(fmt.Sprintf("No configuration file found at %s. Returning defaults.", path)) return defaultConfigFunc() } @@ -292,13 +292,13 @@ func (s Configuration) SaveCollectorManifests() (CollectorManifests, error) { manifests := CollectorManifests{} if azureHoundManifest, err := generateCollectorManifest(filepath.Join(s.CollectorsDirectory(), azureHoundCollector)); err != nil { - log.Errorf("Error generating AzureHound manifest file: %s", err) + log.Errorf(fmt.Sprintf("Error generating AzureHound manifest file: %s", err)) } else { manifests[azureHoundCollector] = azureHoundManifest } if sharpHoundManifest, err := generateCollectorManifest(filepath.Join(s.CollectorsDirectory(), sharpHoundCollector)); err != nil { - log.Errorf("Error generating SharpHound manifest file: %s", err) + log.Errorf(fmt.Sprintf("Error generating SharpHound manifest file: %s", err)) } else { manifests[sharpHoundCollector] = sharpHoundManifest } diff --git a/cmd/api/src/daemons/api/bhapi/api.go b/cmd/api/src/daemons/api/bhapi/api.go index 1e1fa15df1..d71a4314a8 100644 --- a/cmd/api/src/daemons/api/bhapi/api.go +++ b/cmd/api/src/daemons/api/bhapi/api.go @@ -19,6 +19,7 @@ package bhapi import ( "context" "errors" + "fmt" "net/http" "github.com/specterops/bloodhound/log" @@ -53,13 +54,13 @@ func (s Daemon) Start(ctx context.Context) { if s.cfg.TLS.Enabled() { if err := s.server.ListenAndServeTLS(s.cfg.TLS.CertFile, s.cfg.TLS.KeyFile); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf("HTTP server listen error: %v", err) + log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) } } } else { if err := s.server.ListenAndServe(); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf("HTTP server listen error: %v", err) + log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) } } } diff --git a/cmd/api/src/daemons/api/toolapi/api.go b/cmd/api/src/daemons/api/toolapi/api.go index 5ec6f75523..77917d157c 100644 --- a/cmd/api/src/daemons/api/toolapi/api.go +++ b/cmd/api/src/daemons/api/toolapi/api.go @@ -19,6 +19,7 @@ package toolapi import ( "context" "errors" + "fmt" "net/http" "net/http/pprof" @@ -114,13 +115,13 @@ func (s Daemon) Start(ctx context.Context) { if s.cfg.TLS.Enabled() { if err := s.server.ListenAndServeTLS(s.cfg.TLS.CertFile, s.cfg.TLS.KeyFile); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf("HTTP server listen error: %v", err) + log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) } } } else { if err := s.server.ListenAndServe(); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf("HTTP server listen error: %v", err) + log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) } } } diff --git a/cmd/api/src/daemons/daemon.go b/cmd/api/src/daemons/daemon.go index d7cf7acb91..f133c7db1f 100644 --- a/cmd/api/src/daemons/daemon.go +++ b/cmd/api/src/daemons/daemon.go @@ -18,6 +18,7 @@ package daemons import ( "context" + "fmt" "sync" "time" @@ -48,7 +49,7 @@ func (s *Manager) Start(ctx context.Context, daemons ...Daemon) { defer s.daemonsLock.Unlock() for _, daemon := range daemons { - log.Infof("Starting daemon %s", daemon.Name()) + log.Infof(fmt.Sprintf("Starting daemon %s", daemon.Name())) go daemon.Start(ctx) s.daemons = append(s.daemons, daemon) @@ -63,10 +64,10 @@ func (s *Manager) Stop() { defer cancel() for _, daemon := range s.daemons { - log.Infof("Shutting down daemon %s", daemon.Name()) + log.Infof(fmt.Sprintf("Shutting down daemon %s", daemon.Name())) if err := daemon.Stop(shutdownCtx); err != nil { - log.Errorf("Failure caught while shutting down daemon %s: %v", daemon.Name(), err) + log.Errorf(fmt.Sprintf("Failure caught while shutting down daemon %s: %v", daemon.Name(), err)) } } } diff --git a/cmd/api/src/daemons/datapipe/agi.go b/cmd/api/src/daemons/datapipe/agi.go index 406cc85cf0..eed7e69135 100644 --- a/cmd/api/src/daemons/datapipe/agi.go +++ b/cmd/api/src/daemons/datapipe/agi.go @@ -18,6 +18,7 @@ package datapipe import ( "context" + "fmt" "sync" commonanalysis "github.com/specterops/bloodhound/analysis" @@ -73,7 +74,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { // log missing tenant IDs for easier debugging for _, tenant := range tenants { if _, err = tenant.Properties.Get(azure.TenantID.String()).String(); err != nil { - log.Errorf("Error getting tenant id for tenant %d: %v", tenant.ID, err) + log.Errorf(fmt.Sprintf("Error getting tenant id for tenant %d: %v", tenant.ID, err)) } } @@ -113,7 +114,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { return nil }); err != nil { - log.Errorf("Failed tagging update: %v", err) + log.Errorf(fmt.Sprintf("Failed tagging update: %v", err)) } }() @@ -126,7 +127,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { for tenant := range tenantC { if roots, err := azureAnalysis.FetchAzureAttackPathRoots(tx, tenant); err != nil { - log.Errorf("Failed fetching roots for tenant %d: %v", tenant.ID, err) + log.Errorf(fmt.Sprintf("Failed fetching roots for tenant %d: %v", tenant.ID, err)) } else { for _, root := range roots { rootsC <- root.ID @@ -136,7 +137,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { return nil }); err != nil { - log.Errorf("Error reading attack path roots for tenants: %v", err) + log.Errorf(fmt.Sprintf("Error reading attack path roots for tenants: %v", err)) } }(workerID) } @@ -213,7 +214,7 @@ func RunAssetGroupIsolationCollections(ctx context.Context, db database.Database for idx, node := range assetGroupNodes { if objectID, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Node %d that does not have valid %s property", node.ID, common.ObjectID) + log.Errorf(fmt.Sprintf("Node %d that does not have valid %s property", node.ID, common.ObjectID)) } else { entries[idx] = model.AssetGroupCollectionEntry{ ObjectID: objectID, diff --git a/cmd/api/src/daemons/datapipe/analysis.go b/cmd/api/src/daemons/datapipe/analysis.go index 1a39a3f8b5..9fa8df1274 100644 --- a/cmd/api/src/daemons/datapipe/analysis.go +++ b/cmd/api/src/daemons/datapipe/analysis.go @@ -103,7 +103,7 @@ func RunAnalysisOperations(ctx context.Context, db database.Database, graphDB gr if len(collectedErrors) > 0 { for _, err := range collectedErrors { - log.Errorf("Analysis error encountered: %v", err) + log.Errorf(fmt.Sprintf("Analysis error encountered: %v", err)) } } diff --git a/cmd/api/src/daemons/datapipe/azure_convertors.go b/cmd/api/src/daemons/datapipe/azure_convertors.go index c1373d84b1..5314ee0515 100644 --- a/cmd/api/src/daemons/datapipe/azure_convertors.go +++ b/cmd/api/src/daemons/datapipe/azure_convertors.go @@ -150,7 +150,7 @@ func getKindConverter(kind enums.Kind) func(json.RawMessage, *ConvertedAzureData func convertAzureApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.App if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf("Error deserializing azure application: %v", err) + log.Errorf(fmt.Sprintf("Error deserializing azure application: %v", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAZAppToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAZAppRelationships(data)...) @@ -160,7 +160,7 @@ func convertAzureApp(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureVMScaleSet(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VMScaleSet if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine scale set", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine scale set", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAZVMScaleSetToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAZVMScaleSetRelationships(data)...) @@ -171,7 +171,7 @@ func convertAzureVMScaleSetRoleAssignment(raw json.RawMessage, converted *Conver var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine scale set role assignments", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine scale set role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVMScaleSetRoleAssignment(data)...) } @@ -183,18 +183,18 @@ func convertAzureAppOwner(raw json.RawMessage, converted *ConvertedAzureData) { ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "app owner", err) + log.Errorf(fmt.Sprintf(SerialError, "app owner", err)) } else { for _, raw := range data.Owners { var ( owner azureModels.DirectoryObject ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(SerialError, "app owner", err) + log.Errorf(fmt.Sprintf(SerialError, "app owner", err)) } else if ownerType, err := ein.ExtractTypeFromDirectoryObject(owner); errors.Is(err, ein.ErrInvalidType) { - log.Warnf(ExtractError, err) + log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(ExtractError, err) + log.Errorf(fmt.Sprintf(ExtractError, err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureOwnerToRel(owner, ownerType, azure.App, data.AppId)) } @@ -206,7 +206,7 @@ func convertAzureAppRoleAssignment(raw json.RawMessage, converted *ConvertedAzur var data models.AppRoleAssignment if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "app role assignment", err) + log.Errorf(fmt.Sprintf(SerialError, "app role assignment", err)) } else if data.AppId == azure.MSGraphAppUniversalID && data.PrincipalType == PrincipalTypeServicePrincipal { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureAppRoleAssignmentToNodes(data)...) if rel := ein.ConvertAzureAppRoleAssignmentToRel(data); rel.IsValid() { @@ -218,7 +218,7 @@ func convertAzureAppRoleAssignment(raw json.RawMessage, converted *ConvertedAzur func convertAzureDevice(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Device if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure device", err) + log.Errorf(fmt.Sprintf(SerialError, "azure device", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAZDeviceToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAZDeviceRelationships(data)...) @@ -230,18 +230,18 @@ func convertAzureDeviceOwner(raw json.RawMessage, converted *ConvertedAzureData) data models.DeviceOwners ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "device owners", err) + log.Errorf(fmt.Sprintf(SerialError, "device owners", err)) } else { for _, raw := range data.Owners { var ( owner azureModels.DirectoryObject ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(SerialError, "device owner", err) + log.Errorf(fmt.Sprintf(SerialError, "device owner", err)) } else if ownerType, err := ein.ExtractTypeFromDirectoryObject(owner); errors.Is(err, ein.ErrInvalidType) { - log.Warnf(ExtractError, err) + log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(ExtractError, err) + log.Errorf(fmt.Sprintf(ExtractError, err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureOwnerToRel(owner, ownerType, azure.Device, data.DeviceId)) } @@ -252,7 +252,7 @@ func convertAzureDeviceOwner(raw json.RawMessage, converted *ConvertedAzureData) func convertAzureFunctionApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.FunctionApp if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure function app", err) + log.Errorf(fmt.Sprintf(SerialError, "azure function app", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureFunctionAppToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAzureFunctionAppToRels(data)...) @@ -263,7 +263,7 @@ func convertAzureFunctionAppRoleAssignment(raw json.RawMessage, converted *Conve var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure function app role assignments", err) + log.Errorf(fmt.Sprintf(SerialError, "azure function app role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureFunctionAppRoleAssignmentToRels(data)...) } @@ -272,7 +272,7 @@ func convertAzureFunctionAppRoleAssignment(raw json.RawMessage, converted *Conve func convertAzureGroup(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Group if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure group", err) + log.Errorf(fmt.Sprintf(SerialError, "azure group", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureGroupToNode(data)) if onPremNode := ein.ConvertAzureGroupToOnPremisesNode(data); onPremNode.IsValid() { @@ -288,7 +288,7 @@ func convertAzureGroupMember(raw json.RawMessage, converted *ConvertedAzureData) ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure group members", err) + log.Errorf(fmt.Sprintf(SerialError, "azure group members", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureGroupMembersToRels(data)...) } @@ -299,7 +299,7 @@ func convertAzureGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) data models.GroupOwners ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure group owners", err) + log.Errorf(fmt.Sprintf(SerialError, "azure group owners", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureGroupOwnerToRels(data)...) } @@ -308,7 +308,7 @@ func convertAzureGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) func convertAzureKeyVault(raw json.RawMessage, converted *ConvertedAzureData) { var data models.KeyVault if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure keyvault", err) + log.Errorf(fmt.Sprintf(SerialError, "azure keyvault", err)) } else { node, rel := ein.ConvertAzureKeyVault(data) converted.NodeProps = append(converted.NodeProps, node) @@ -322,7 +322,7 @@ func convertAzureKeyVaultAccessPolicy(raw json.RawMessage, converted *ConvertedA ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure key vault access policy", err) + log.Errorf(fmt.Sprintf(SerialError, "azure key vault access policy", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultAccessPolicy(data)...) } @@ -334,7 +334,7 @@ func convertAzureKeyVaultContributor(raw json.RawMessage, converted *ConvertedAz ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure keyvault contributor", err) + log.Errorf(fmt.Sprintf(SerialError, "azure keyvault contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultContributor(data)...) } @@ -346,7 +346,7 @@ func convertAzureKeyVaultKVContributor(raw json.RawMessage, converted *Converted ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure keyvault kvcontributor", err) + log.Errorf(fmt.Sprintf(SerialError, "azure keyvault kvcontributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultKVContributor(data)...) } @@ -358,7 +358,7 @@ func convertAzureKeyVaultOwner(raw json.RawMessage, converted *ConvertedAzureDat ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure keyvault owner", err) + log.Errorf(fmt.Sprintf(SerialError, "azure keyvault owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultOwnerToRels(data)...) } @@ -367,7 +367,7 @@ func convertAzureKeyVaultOwner(raw json.RawMessage, converted *ConvertedAzureDat func convertAzureKeyVaultUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.KeyVaultUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure keyvault user access admin", err) + log.Errorf(fmt.Sprintf(SerialError, "azure keyvault user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultUserAccessAdminToRels(data)...) } @@ -376,7 +376,7 @@ func convertAzureKeyVaultUserAccessAdmin(raw json.RawMessage, converted *Convert func convertAzureManagementGroupDescendant(raw json.RawMessage, converted *ConvertedAzureData) { var data azureModels.DescendantInfo if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure management group descendant list", err) + log.Errorf(fmt.Sprintf(SerialError, "azure management group descendant list", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagementGroupDescendantToRel(data)) } @@ -385,7 +385,7 @@ func convertAzureManagementGroupDescendant(raw json.RawMessage, converted *Conve func convertAzureManagementGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagementGroupOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure management group owner", err) + log.Errorf(fmt.Sprintf(SerialError, "azure management group owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagementGroupOwnerToRels(data)...) } @@ -394,7 +394,7 @@ func convertAzureManagementGroupOwner(raw json.RawMessage, converted *ConvertedA func convertAzureManagementGroupUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagementGroupUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure management group user access admin", err) + log.Errorf(fmt.Sprintf(SerialError, "azure management group user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagementGroupUserAccessAdminToRels(data)...) } @@ -403,7 +403,7 @@ func convertAzureManagementGroupUserAccessAdmin(raw json.RawMessage, converted * func convertAzureManagementGroup(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagementGroup if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure management group", err) + log.Errorf(fmt.Sprintf(SerialError, "azure management group", err)) } else { node, rel := ein.ConvertAzureManagementGroup(data) converted.RelProps = append(converted.RelProps, rel) @@ -414,7 +414,7 @@ func convertAzureManagementGroup(raw json.RawMessage, converted *ConvertedAzureD func convertAzureResourceGroup(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ResourceGroup if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure resource group", err) + log.Errorf(fmt.Sprintf(SerialError, "azure resource group", err)) } else { node, rel := ein.ConvertAzureResourceGroup(data) converted.RelProps = append(converted.RelProps, rel) @@ -425,7 +425,7 @@ func convertAzureResourceGroup(raw json.RawMessage, converted *ConvertedAzureDat func convertAzureResourceGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ResourceGroupOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure keyvault", err) + log.Errorf(fmt.Sprintf(SerialError, "azure keyvault", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureResourceGroupOwnerToRels(data)...) } @@ -434,7 +434,7 @@ func convertAzureResourceGroupOwner(raw json.RawMessage, converted *ConvertedAzu func convertAzureResourceGroupUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ResourceGroupUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure resource group user access admin", err) + log.Errorf(fmt.Sprintf(SerialError, "azure resource group user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureResourceGroupUserAccessAdminToRels(data)...) } @@ -443,7 +443,7 @@ func convertAzureResourceGroupUserAccessAdmin(raw json.RawMessage, converted *Co func convertAzureRole(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Role if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure role", err) + log.Errorf(fmt.Sprintf(SerialError, "azure role", err)) } else { node, rel := ein.ConvertAzureRole(data) converted.NodeProps = append(converted.NodeProps, node) @@ -454,7 +454,7 @@ func convertAzureRole(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureRoleAssignment(raw json.RawMessage, converted *ConvertedAzureData) { var data models.RoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure role assignment", err) + log.Errorf(fmt.Sprintf(SerialError, "azure role assignment", err)) } else { for _, raw := range data.RoleAssignments { var ( @@ -469,7 +469,7 @@ func convertAzureRoleAssignment(raw json.RawMessage, converted *ConvertedAzureDa func convertAzureServicePrincipal(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ServicePrincipal if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure service principal owner", err) + log.Errorf(fmt.Sprintf(SerialError, "azure service principal owner", err)) } else { nodes, rels := ein.ConvertAzureServicePrincipal(data) converted.NodeProps = append(converted.NodeProps, nodes...) @@ -482,7 +482,7 @@ func convertAzureServicePrincipalOwner(raw json.RawMessage, converted *Converted data models.ServicePrincipalOwners ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure service principal owners", err) + log.Errorf(fmt.Sprintf(SerialError, "azure service principal owners", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureServicePrincipalOwnerToRels(data)...) } @@ -491,7 +491,7 @@ func convertAzureServicePrincipalOwner(raw json.RawMessage, converted *Converted func convertAzureSubscription(raw json.RawMessage, converted *ConvertedAzureData) { var data azureModels.Subscription if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure subscription", err) + log.Errorf(fmt.Sprintf(SerialError, "azure subscription", err)) } else { node, rel := ein.ConvertAzureSubscription(data) converted.NodeProps = append(converted.NodeProps, node) @@ -502,7 +502,7 @@ func convertAzureSubscription(raw json.RawMessage, converted *ConvertedAzureData func convertAzureSubscriptionOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.SubscriptionOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure subscription owner", err) + log.Errorf(fmt.Sprintf(SerialError, "azure subscription owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureSubscriptionOwnerToRels(data)...) } @@ -511,7 +511,7 @@ func convertAzureSubscriptionOwner(raw json.RawMessage, converted *ConvertedAzur func convertAzureSubscriptionUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.SubscriptionUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure subscription user access admin", err) + log.Errorf(fmt.Sprintf(SerialError, "azure subscription user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureSubscriptionUserAccessAdminToRels(data)...) } @@ -520,7 +520,7 @@ func convertAzureSubscriptionUserAccessAdmin(raw json.RawMessage, converted *Con func convertAzureTenant(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Tenant if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure tenant", err) + log.Errorf(fmt.Sprintf(SerialError, "azure tenant", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureTenantToNode(data)) } @@ -529,7 +529,7 @@ func convertAzureTenant(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureUser(raw json.RawMessage, converted *ConvertedAzureData) { var data models.User if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure user", err) + log.Errorf(fmt.Sprintf(SerialError, "azure user", err)) } else { node, onPremNode, rel := ein.ConvertAzureUser(data) converted.NodeProps = append(converted.NodeProps, node) @@ -543,7 +543,7 @@ func convertAzureUser(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureVirtualMachine(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachine if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine", err)) } else { node, rels := ein.ConvertAzureVirtualMachine(data) converted.NodeProps = append(converted.NodeProps, node) @@ -554,7 +554,7 @@ func convertAzureVirtualMachine(raw json.RawMessage, converted *ConvertedAzureDa func convertAzureVirtualMachineAdminLogin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineAdminLogins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine admin login", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine admin login", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineAdminLoginToRels(data)...) } @@ -563,7 +563,7 @@ func convertAzureVirtualMachineAdminLogin(raw json.RawMessage, converted *Conver func convertAzureVirtualMachineAvereContributor(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineAvereContributors if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine avere contributor", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine avere contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineAvereContributorToRels(data)...) } @@ -572,7 +572,7 @@ func convertAzureVirtualMachineAvereContributor(raw json.RawMessage, converted * func convertAzureVirtualMachineContributor(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineContributors if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine contributor", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineContributorToRels(data)...) } @@ -581,7 +581,7 @@ func convertAzureVirtualMachineContributor(raw json.RawMessage, converted *Conve func convertAzureVirtualMachineVMContributor(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineVMContributors if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine contributor", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineVMContributorToRels(data)...) } @@ -590,7 +590,7 @@ func convertAzureVirtualMachineVMContributor(raw json.RawMessage, converted *Con func convertAzureVirtualMachineOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine owner", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineOwnerToRels(data)...) } @@ -599,7 +599,7 @@ func convertAzureVirtualMachineOwner(raw json.RawMessage, converted *ConvertedAz func convertAzureVirtualMachineUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure virtual machine user access admin", err) + log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineUserAccessAdminToRels(data)...) } @@ -608,7 +608,7 @@ func convertAzureVirtualMachineUserAccessAdmin(raw json.RawMessage, converted *C func convertAzureManagedCluster(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagedCluster if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure managed cluster", err) + log.Errorf(fmt.Sprintf(SerialError, "azure managed cluster", err)) } else { NodeResourceGroupID := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s", data.SubscriptionId, data.Properties.NodeResourceGroup) @@ -622,7 +622,7 @@ func convertAzureManagedClusterRoleAssignment(raw json.RawMessage, converted *Co var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure managed cluster role assignments", err) + log.Errorf(fmt.Sprintf(SerialError, "azure managed cluster role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagedClusterRoleAssignmentToRels(data)...) } @@ -631,7 +631,7 @@ func convertAzureManagedClusterRoleAssignment(raw json.RawMessage, converted *Co func convertAzureContainerRegistry(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ContainerRegistry if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure container registry", err) + log.Errorf(fmt.Sprintf(SerialError, "azure container registry", err)) } else { node, rels := ein.ConvertAzureContainerRegistry(data) converted.NodeProps = append(converted.NodeProps, node) @@ -642,7 +642,7 @@ func convertAzureContainerRegistry(raw json.RawMessage, converted *ConvertedAzur func convertAzureWebApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.WebApp if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure web app", err) + log.Errorf(fmt.Sprintf(SerialError, "azure web app", err)) } else { node, relationships := ein.ConvertAzureWebApp(data) converted.NodeProps = append(converted.NodeProps, node) @@ -654,7 +654,7 @@ func convertAzureContainerRegistryRoleAssignment(raw json.RawMessage, converted var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure container registry role assignments", err) + log.Errorf(fmt.Sprintf(SerialError, "azure container registry role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureContainerRegistryRoleAssignment(data)...) } @@ -664,7 +664,7 @@ func convertAzureWebAppRoleAssignment(raw json.RawMessage, converted *ConvertedA var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure web app role assignments", err) + log.Errorf(fmt.Sprintf(SerialError, "azure web app role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureWebAppRoleAssignment(data)...) } @@ -673,7 +673,7 @@ func convertAzureWebAppRoleAssignment(raw json.RawMessage, converted *ConvertedA func convertAzureLogicApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.LogicApp if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure logic app", err) + log.Errorf(fmt.Sprintf(SerialError, "azure logic app", err)) } else { node, relationships := ein.ConvertAzureLogicApp(data) converted.NodeProps = append(converted.NodeProps, node) @@ -685,7 +685,7 @@ func convertAzureLogicAppRoleAssignment(raw json.RawMessage, converted *Converte var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure logic app role assignments", err) + log.Errorf(fmt.Sprintf(SerialError, "azure logic app role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureLogicAppRoleAssignment(data)...) } @@ -694,7 +694,7 @@ func convertAzureLogicAppRoleAssignment(raw json.RawMessage, converted *Converte func convertAzureAutomationAccount(raw json.RawMessage, converted *ConvertedAzureData) { var data models.AutomationAccount if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure automation account", err) + log.Errorf(fmt.Sprintf(SerialError, "azure automation account", err)) } else { node, relationships := ein.ConvertAzureAutomationAccount(data) converted.NodeProps = append(converted.NodeProps, node) @@ -706,7 +706,7 @@ func convertAzureAutomationAccountRoleAssignment(raw json.RawMessage, converted var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(SerialError, "azure automation account role assignments", err) + log.Errorf(fmt.Sprintf(SerialError, "azure automation account role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureAutomationAccountRoleAssignment(data)...) } diff --git a/cmd/api/src/daemons/datapipe/cleanup.go b/cmd/api/src/daemons/datapipe/cleanup.go index f7c222c349..65578e84aa 100644 --- a/cmd/api/src/daemons/datapipe/cleanup.go +++ b/cmd/api/src/daemons/datapipe/cleanup.go @@ -20,6 +20,7 @@ package datapipe import ( "context" + "fmt" "os" "path/filepath" "strings" @@ -79,11 +80,11 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin // Release the lock once finished defer s.lock.Unlock() - log.Infof("Running OrphanFileSweeper for path %s", s.tempDirectoryRootPath) - log.Debugf("OrphanFileSweeper expected names %v", expectedFileNames) + log.Infof(fmt.Sprintf("Running OrphanFileSweeper for path %s", s.tempDirectoryRootPath)) + log.Debugf(fmt.Sprintf("OrphanFileSweeper expected names %v", expectedFileNames)) if dirEntries, err := s.fileOps.ReadDir(s.tempDirectoryRootPath); err != nil { - log.Errorf("Failed reading work directory %s: %v", s.tempDirectoryRootPath, err) + log.Errorf(fmt.Sprintf("Failed reading work directory %s: %v", s.tempDirectoryRootPath, err)) } else { numDeleted := 0 @@ -93,13 +94,13 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin if expectedDir != "" { expectedDir = strings.TrimSuffix(expectedDir, string(filepath.Separator)) if expectedDir != s.tempDirectoryRootPath { - log.Warnf("directory '%s' for expectedFileName '%s' does not match tempDirectoryRootPath '%s': skipping", expectedDir, expectedFileName, s.tempDirectoryRootPath) + log.Warnf(fmt.Sprintf("directory '%s' for expectedFileName '%s' does not match tempDirectoryRootPath '%s': skipping", expectedDir, expectedFileName, s.tempDirectoryRootPath)) continue } } for idx, dirEntry := range dirEntries { if expectedFN == dirEntry.Name() { - log.Debugf("skipping expected file %s", expectedFN) + log.Debugf(fmt.Sprintf("skipping expected file %s", expectedFN)) dirEntries = append(dirEntries[:idx], dirEntries[idx+1:]...) } } @@ -111,18 +112,18 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin break } - log.Infof("Removing orphaned file %s", orphanedDirEntry.Name()) + log.Infof(fmt.Sprintf("Removing orphaned file %s", orphanedDirEntry.Name())) fullPath := filepath.Join(s.tempDirectoryRootPath, orphanedDirEntry.Name()) if err := s.fileOps.RemoveAll(fullPath); err != nil { - log.Errorf("Failed removing orphaned file %s: %v", fullPath, err) + log.Errorf(fmt.Sprintf("Failed removing orphaned file %s: %v", fullPath, err)) } numDeleted += 1 } if numDeleted > 0 { - log.Infof("Finished removing %d orphaned ingest files", numDeleted) + log.Infof(fmt.Sprintf("Finished removing %d orphaned ingest files", numDeleted)) } } } diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 7da281cb85..0f50e36f43 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -19,6 +19,7 @@ package datapipe import ( "context" "errors" + "fmt" "time" "github.com/specterops/bloodhound/cache" @@ -66,7 +67,7 @@ func (s *Daemon) analyze() { // Ensure that the user-requested analysis switch is deleted. This is done at the beginning of the // function so that any re-analysis requests are caught while analysis is in-progress. if err := s.db.DeleteAnalysisRequest(s.ctx); err != nil { - log.Errorf("Error deleting analysis request: %v", err) + log.Errorf(fmt.Sprintf("Error deleting analysis request: %v", err)) return } @@ -75,7 +76,7 @@ func (s *Daemon) analyze() { } if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusAnalyzing, false); err != nil { - log.Errorf("Error setting datapipe status: %v", err) + log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) return } @@ -85,14 +86,14 @@ func (s *Daemon) analyze() { if errors.Is(err, ErrAnalysisFailed) { FailAnalyzedFileUploadJobs(s.ctx, s.db) if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, false); err != nil { - log.Errorf("Error setting datapipe status: %v", err) + log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) return } } else if errors.Is(err, ErrAnalysisPartiallyCompleted) { PartialCompleteFileUploadJobs(s.ctx, s.db) if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, true); err != nil { - log.Errorf("Error setting datapipe status: %v", err) + log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) return } } @@ -100,13 +101,13 @@ func (s *Daemon) analyze() { CompleteAnalyzedFileUploadJobs(s.ctx, s.db) if entityPanelCachingFlag, err := s.db.GetFlagByKey(s.ctx, appcfg.FeatureEntityPanelCaching); err != nil { - log.Errorf("Error retrieving entity panel caching flag: %v", err) + log.Errorf(fmt.Sprintf("Error retrieving entity panel caching flag: %v", err)) } else { resetCache(s.cache, entityPanelCachingFlag.Enabled) } if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, true); err != nil { - log.Errorf("Error setting datapipe status: %v", err) + log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) return } } @@ -114,15 +115,15 @@ func (s *Daemon) analyze() { func resetCache(cacher cache.Cache, _ bool) { if err := cacher.Reset(); err != nil { - log.Errorf("Error while resetting the cache: %v", err) + log.Errorf(fmt.Sprintf("Error while resetting the cache: %v", err)) } else { - log.Infof("Cache successfully reset by datapipe daemon") + log.Infof(fmt.Sprintf("Cache successfully reset by datapipe daemon")) } } func (s *Daemon) ingestAvailableTasks() { if ingestTasks, err := s.db.GetAllIngestTasks(s.ctx); err != nil { - log.Errorf("Failed fetching available ingest tasks: %v", err) + log.Errorf(fmt.Sprintf("Failed fetching available ingest tasks: %v", err)) } else { s.processIngestTasks(s.ctx, ingestTasks) } @@ -160,7 +161,7 @@ func (s *Daemon) Start(ctx context.Context) { // If there are completed file upload jobs or if analysis was user-requested, perform analysis. if hasJobsWaitingForAnalysis, err := HasFileUploadJobsWaitingForAnalysis(s.ctx, s.db); err != nil { - log.Errorf("Failed looking up jobs waiting for analysis: %v", err) + log.Errorf(fmt.Sprintf("Failed looking up jobs waiting for analysis: %v", err)) } else if hasJobsWaitingForAnalysis || s.db.HasAnalysisRequest(s.ctx) { s.analyze() } @@ -182,18 +183,18 @@ func (s *Daemon) deleteData() { defer log.Measure(log.LevelInfo, "Purge Graph Data Completed")() if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusPurging, false); err != nil { - log.Errorf("Error setting datapipe status: %v", err) + log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) return } - log.Infof("Begin Purge Graph Data") + log.Infof(fmt.Sprintf("Begin Purge Graph Data")) if err := s.db.CancelAllFileUploads(s.ctx); err != nil { - log.Errorf("Error cancelling jobs during data deletion: %v", err) + log.Errorf(fmt.Sprintf("Error cancelling jobs during data deletion: %v", err)) } else if err := s.db.DeleteAllIngestTasks(s.ctx); err != nil { - log.Errorf("Error deleting ingest tasks during data deletion: %v", err) + log.Errorf(fmt.Sprintf("Error deleting ingest tasks during data deletion: %v", err)) } else if err := DeleteCollectedGraphData(s.ctx, s.graphdb); err != nil { - log.Errorf("Error deleting graph data: %v", err) + log.Errorf(fmt.Sprintf("Error deleting graph data: %v", err)) } } @@ -203,7 +204,7 @@ func (s *Daemon) Stop(ctx context.Context) error { func (s *Daemon) clearOrphanedData() { if ingestTasks, err := s.db.GetAllIngestTasks(s.ctx); err != nil { - log.Errorf("Failed fetching available file upload ingest tasks: %v", err) + log.Errorf(fmt.Sprintf("Failed fetching available file upload ingest tasks: %v", err)) } else { expectedFiles := make([]string, len(ingestTasks)) diff --git a/cmd/api/src/daemons/datapipe/decoders.go b/cmd/api/src/daemons/datapipe/decoders.go index 5ba329c6e3..b683947d0d 100644 --- a/cmd/api/src/daemons/datapipe/decoders.go +++ b/cmd/api/src/daemons/datapipe/decoders.go @@ -18,6 +18,7 @@ package datapipe import ( "errors" + "fmt" "io" "github.com/specterops/bloodhound/dawgs/graph" @@ -49,7 +50,7 @@ func decodeBasicData[T any](batch graph.Batch, reader io.ReadSeeker, conversionF // This variable needs to be initialized here, otherwise the marshaller will cache the map in the struct var decodeTarget T if err := decoder.Decode(&decodeTarget); err != nil { - log.Errorf("Error decoding %T object: %v", decodeTarget, err) + log.Errorf(fmt.Sprintf("Error decoding %T object: %v", decodeTarget, err)) if errors.Is(err, io.EOF) { break } @@ -93,7 +94,7 @@ func decodeGroupData(batch graph.Batch, reader io.ReadSeeker) error { for decoder.More() { var group ein.Group if err = decoder.Decode(&group); err != nil { - log.Errorf("Error decoding group object: %v", err) + log.Errorf(fmt.Sprintf("Error decoding group object: %v", err)) if errors.Is(err, io.EOF) { break } @@ -135,7 +136,7 @@ func decodeSessionData(batch graph.Batch, reader io.ReadSeeker) error { for decoder.More() { var session ein.Session if err = decoder.Decode(&session); err != nil { - log.Errorf("Error decoding session object: %v", err) + log.Errorf(fmt.Sprintf("Error decoding session object: %v", err)) if errors.Is(err, io.EOF) { break } @@ -177,7 +178,7 @@ func decodeAzureData(batch graph.Batch, reader io.ReadSeeker) error { for decoder.More() { var data AzureBase if err = decoder.Decode(&data); err != nil { - log.Errorf("Error decoding azure object: %v", err) + log.Errorf(fmt.Sprintf("Error decoding azure object: %v", err)) if errors.Is(err, io.EOF) { break } diff --git a/cmd/api/src/daemons/datapipe/ingest.go b/cmd/api/src/daemons/datapipe/ingest.go index 2823fdbc56..860f4fab42 100644 --- a/cmd/api/src/daemons/datapipe/ingest.go +++ b/cmd/api/src/daemons/datapipe/ingest.go @@ -145,7 +145,7 @@ func NormalizeEinNodeProperties(properties map[string]any, objectID string, nowU if name, typeMatches := rawName.(string); typeMatches { properties[common.Name.String()] = strings.ToUpper(name) } else { - log.Errorf("Bad type found for node name property during ingest. Expected string, got %T", rawName) + log.Errorf(fmt.Sprintf("Bad type found for node name property during ingest. Expected string, got %T", rawName)) } } @@ -153,7 +153,7 @@ func NormalizeEinNodeProperties(properties map[string]any, objectID string, nowU if os, typeMatches := rawOS.(string); typeMatches { properties[common.OperatingSystem.String()] = strings.ToUpper(os) } else { - log.Errorf("Bad type found for node operating system property during ingest. Expected string, got %T", rawOS) + log.Errorf(fmt.Sprintf("Bad type found for node operating system property during ingest. Expected string, got %T", rawOS)) } } @@ -161,7 +161,7 @@ func NormalizeEinNodeProperties(properties map[string]any, objectID string, nowU if dn, typeMatches := rawDN.(string); typeMatches { properties[ad.DistinguishedName.String()] = strings.ToUpper(dn) } else { - log.Errorf("Bad type found for node distinguished name property during ingest. Expected string, got %T", rawDN) + log.Errorf(fmt.Sprintf("Bad type found for node distinguished name property during ingest. Expected string, got %T", rawDN)) } } @@ -188,7 +188,7 @@ func IngestNodes(batch graph.Batch, identityKind graph.Kind, nodes []ein.Ingesti for _, next := range nodes { if err := IngestNode(batch, nowUTC, identityKind, next); err != nil { - log.Errorf("Error ingesting node ID %s: %v", next.ObjectID, err) + log.Errorf(fmt.Sprintf("Error ingesting node ID %s: %v", next.ObjectID, err)) errs.Add(err) } } @@ -231,7 +231,7 @@ func IngestRelationships(batch graph.Batch, nodeIDKind graph.Kind, relationships for _, next := range relationships { if err := IngestRelationship(batch, nowUTC, nodeIDKind, next); err != nil { - log.Errorf("Error ingesting relationship from %s to %s : %v", next.Source, next.Target, err) + log.Errorf(fmt.Sprintf("Error ingesting relationship from %s to %s : %v", next.Source, next.Target, err)) errs.Add(err) } } @@ -274,7 +274,7 @@ func IngestDNRelationships(batch graph.Batch, relationships []ein.IngestibleRela for _, next := range relationships { if err := ingestDNRelationship(batch, nowUTC, next); err != nil { - log.Errorf("Error ingesting relationship: %v", err) + log.Errorf(fmt.Sprintf("Error ingesting relationship: %v", err)) errs.Add(err) } } @@ -319,7 +319,7 @@ func IngestSessions(batch graph.Batch, sessions []ein.IngestibleSession) error { for _, next := range sessions { if err := ingestSession(batch, nowUTC, next); err != nil { - log.Errorf("Error ingesting sessions: %v", err) + log.Errorf(fmt.Sprintf("Error ingesting sessions: %v", err)) errs.Add(err) } } diff --git a/cmd/api/src/daemons/datapipe/jobs.go b/cmd/api/src/daemons/datapipe/jobs.go index 29452fade5..bb1f7fb86d 100644 --- a/cmd/api/src/daemons/datapipe/jobs.go +++ b/cmd/api/src/daemons/datapipe/jobs.go @@ -51,11 +51,11 @@ func FailAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { } if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusAnalyzing); err != nil { - log.Errorf("Failed to load file upload jobs under analysis: %v", err) + log.Errorf(fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) } else { for _, job := range fileUploadJobsUnderAnalysis { if err := fileupload.UpdateFileUploadJobStatus(ctx, db, job, model.JobStatusFailed, "Analysis failed"); err != nil { - log.Errorf("Failed updating file upload job %d to failed status: %v", job.ID, err) + log.Errorf(fmt.Sprintf("Failed updating file upload job %d to failed status: %v", job.ID, err)) } } } @@ -69,11 +69,11 @@ func PartialCompleteFileUploadJobs(ctx context.Context, db database.Database) { } if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusAnalyzing); err != nil { - log.Errorf("Failed to load file upload jobs under analysis: %v", err) + log.Errorf(fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) } else { for _, job := range fileUploadJobsUnderAnalysis { if err := fileupload.UpdateFileUploadJobStatus(ctx, db, job, model.JobStatusPartiallyComplete, "Partially Completed"); err != nil { - log.Errorf("Failed updating file upload job %d to partially completed status: %v", job.ID, err) + log.Errorf(fmt.Sprintf("Failed updating file upload job %d to partially completed status: %v", job.ID, err)) } } } @@ -87,7 +87,7 @@ func CompleteAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { } if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusAnalyzing); err != nil { - log.Errorf("Failed to load file upload jobs under analysis: %v", err) + log.Errorf(fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) } else { for _, job := range fileUploadJobsUnderAnalysis { var ( @@ -106,7 +106,7 @@ func CompleteAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { } if err := fileupload.UpdateFileUploadJobStatus(ctx, db, job, status, message); err != nil { - log.Errorf("Error updating file upload job %d: %v", job.ID, err) + log.Errorf(fmt.Sprintf("Error updating file upload job %d: %v", job.ID, err)) } } } @@ -120,14 +120,14 @@ func ProcessIngestedFileUploadJobs(ctx context.Context, db database.Database) { } if ingestingFileUploadJobs, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusIngesting); err != nil { - log.Errorf("Failed to look up finished file upload jobs: %v", err) + log.Errorf(fmt.Sprintf("Failed to look up finished file upload jobs: %v", err)) } else { for _, ingestingFileUploadJob := range ingestingFileUploadJobs { if remainingIngestTasks, err := db.GetIngestTasksForJob(ctx, ingestingFileUploadJob.ID); err != nil { - log.Errorf("Failed looking up remaining ingest tasks for file upload job %d: %v", ingestingFileUploadJob.ID, err) + log.Errorf(fmt.Sprintf("Failed looking up remaining ingest tasks for file upload job %d: %v", ingestingFileUploadJob.ID, err)) } else if len(remainingIngestTasks) == 0 { if err := fileupload.UpdateFileUploadJobStatus(ctx, db, ingestingFileUploadJob, model.JobStatusAnalyzing, "Analyzing"); err != nil { - log.Errorf("Error updating fileupload job %d: %v", ingestingFileUploadJob.ID, err) + log.Errorf(fmt.Sprintf("Error updating fileupload job %d: %v", ingestingFileUploadJob.ID, err)) } } } @@ -137,7 +137,7 @@ func ProcessIngestedFileUploadJobs(ctx context.Context, db database.Database) { // clearFileTask removes a generic file upload task for ingested data. func (s *Daemon) clearFileTask(ingestTask model.IngestTask) { if err := s.db.DeleteIngestTask(s.ctx, ingestTask); err != nil { - log.Errorf("Error removing file upload task from db: %v", err) + log.Errorf(fmt.Sprintf("Error removing file upload task from db: %v", err)) } } @@ -184,9 +184,9 @@ func (s *Daemon) preProcessIngestFile(path string, fileType model.FileType) ([]s //Close the archive and delete it if err := archive.Close(); err != nil { - log.Errorf("Error closing archive %s: %v", path, err) + log.Errorf(fmt.Sprintf("Error closing archive %s: %v", path, err)) } else if err := os.Remove(path); err != nil { - log.Errorf("Error deleting archive %s: %v", path, err) + log.Errorf(fmt.Sprintf("Error deleting archive %s: %v", path, err)) } return filePaths, failed, errs.Combined() @@ -198,7 +198,7 @@ func (s *Daemon) preProcessIngestFile(path string, fileType model.FileType) ([]s func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType model.FileType) (int, int, error) { adcsEnabled := false if adcsFlag, err := s.db.GetFlagByKey(ctx, appcfg.FeatureAdcs); err != nil { - log.Errorf("Error getting ADCS flag: %v", err) + log.Errorf(fmt.Sprintf("Error getting ADCS flag: %v", err)) } else { adcsEnabled = adcsFlag.Enabled } @@ -215,15 +215,15 @@ func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType mo return err } else if err := ReadFileForIngest(batch, file, adcsEnabled); err != nil { failed++ - log.Errorf("Error reading ingest file %s: %v", filePath, err) + log.Errorf(fmt.Sprintf("Error reading ingest file %s: %v", filePath, err)) } if err := file.Close(); err != nil { - log.Errorf("Error closing ingest file %s: %v", filePath, err) + log.Errorf(fmt.Sprintf("Error closing ingest file %s: %v", filePath, err)) } else if err := os.Remove(filePath); errors.Is(err, fs.ErrNotExist) { - log.Warnf("Removing ingest file %s: %w", filePath, err) + log.Warnf(fmt.Sprintf("Removing ingest file %s: %w", filePath, err)) } else if err != nil { - log.Errorf("Error removing ingest file %s: %v", filePath, err) + log.Errorf(fmt.Sprintf("Error removing ingest file %s: %v", filePath, err)) } } @@ -235,7 +235,7 @@ func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType mo // processIngestTasks covers the generic file upload case for ingested data. func (s *Daemon) processIngestTasks(ctx context.Context, ingestTasks model.IngestTasks) { if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIngesting, false); err != nil { - log.Errorf("Error setting datapipe status: %v", err) + log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) return } defer s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, false) @@ -248,22 +248,22 @@ func (s *Daemon) processIngestTasks(ctx context.Context, ingestTasks model.Inges } if s.cfg.DisableIngest { - log.Warnf("Skipped processing of ingestTasks due to config flag.") + log.Warnf(fmt.Sprintf("Skipped processing of ingestTasks due to config flag.")) return } total, failed, err := s.processIngestFile(ctx, ingestTask.FileName, ingestTask.FileType) if errors.Is(err, fs.ErrNotExist) { - log.Warnf("Did not process ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err) + log.Warnf(fmt.Sprintf("Did not process ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) } else if err != nil { - log.Errorf("Failed processing ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err) + log.Errorf(fmt.Sprintf("Failed processing ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) } else if job, err := s.db.GetFileUploadJob(ctx, ingestTask.TaskID.ValueOrZero()); err != nil { - log.Errorf("Failed to fetch job for ingest task %d: %v", ingestTask.ID, err) + log.Errorf(fmt.Sprintf("Failed to fetch job for ingest task %d: %v", ingestTask.ID, err)) } else { job.TotalFiles = total job.FailedFiles += failed if err = s.db.UpdateFileUploadJob(ctx, job); err != nil { - log.Errorf("Failed to update number of failed files for file upload job ID %s: %v", job.ID, err) + log.Errorf(fmt.Sprintf("Failed to update number of failed files for file upload job ID %s: %v", job.ID, err)) } } diff --git a/cmd/api/src/database/analysisrequest.go b/cmd/api/src/database/analysisrequest.go index e0f5c2eb61..f3306f1306 100644 --- a/cmd/api/src/database/analysisrequest.go +++ b/cmd/api/src/database/analysisrequest.go @@ -19,6 +19,7 @@ package database import ( "context" "errors" + "fmt" "time" "github.com/specterops/bloodhound/log" @@ -52,7 +53,7 @@ func (s *BloodhoundDB) HasAnalysisRequest(ctx context.Context) bool { tx := s.db.WithContext(ctx).Raw(`select exists(select * from analysis_request_switch where request_type = ? limit 1);`, model.AnalysisRequestAnalysis).Scan(&exists) if tx.Error != nil { - log.Errorf("Error determining if there's an analysis request: %v", tx.Error) + log.Errorf(fmt.Sprintf("Error determining if there's an analysis request: %v", tx.Error)) } return exists } @@ -62,7 +63,7 @@ func (s *BloodhoundDB) HasCollectedGraphDataDeletionRequest(ctx context.Context) tx := s.db.WithContext(ctx).Raw(`select exists(select * from analysis_request_switch where request_type = ? limit 1);`, model.AnalysisRequestDeletion).Scan(&exists) if tx.Error != nil { - log.Errorf("Error determining if there's a deletion request: %v", tx.Error) + log.Errorf(fmt.Sprintf("Error determining if there's a deletion request: %v", tx.Error)) } return exists } @@ -92,12 +93,12 @@ func (s *BloodhoundDB) setAnalysisRequest(ctx context.Context, requestType model // RequestAnalysis will request an analysis be executed, as long as there isn't an existing analysis request or collected graph data deletion request, then it no-ops func (s *BloodhoundDB) RequestAnalysis(ctx context.Context, requestedBy string) error { - log.Infof("Analysis requested by %s", requestedBy) + log.Infof(fmt.Sprintf("Analysis requested by %s", requestedBy)) return s.setAnalysisRequest(ctx, model.AnalysisRequestAnalysis, requestedBy) } // RequestCollectedGraphDataDeletion will request collected graph data be deleted, if an analysis request is present, it will overwrite that. func (s *BloodhoundDB) RequestCollectedGraphDataDeletion(ctx context.Context, requestedBy string) error { - log.Infof("Collected graph data deletion requested by %s", requestedBy) + log.Infof(fmt.Sprintf("Collected graph data deletion requested by %s", requestedBy)) return s.setAnalysisRequest(ctx, model.AnalysisRequestDeletion, requestedBy) } diff --git a/cmd/api/src/database/db.go b/cmd/api/src/database/db.go index 292665c68c..0253ce48fe 100644 --- a/cmd/api/src/database/db.go +++ b/cmd/api/src/database/db.go @@ -170,9 +170,9 @@ type BloodhoundDB struct { func (s *BloodhoundDB) Close(ctx context.Context) { if sqlDBRef, err := s.db.WithContext(ctx).DB(); err != nil { - log.Errorf("Failed to fetch SQL DB reference from GORM: %v", err) + log.Errorf(fmt.Sprintf("Failed to fetch SQL DB reference from GORM: %v", err)) } else if err := sqlDBRef.Close(); err != nil { - log.Errorf("Failed closing database: %v", err) + log.Errorf(fmt.Sprintf("Failed closing database: %v", err)) } } @@ -240,7 +240,7 @@ func (s *BloodhoundDB) Wipe(ctx context.Context) error { func (s *BloodhoundDB) Migrate(ctx context.Context) error { // Run the migrator if err := migration.NewMigrator(s.db.WithContext(ctx)).ExecuteStepwiseMigrations(); err != nil { - log.Errorf("Error during SQL database migration phase: %v", err) + log.Errorf(fmt.Sprintf("Error during SQL database migration phase: %v", err)) return err } diff --git a/cmd/api/src/database/log.go b/cmd/api/src/database/log.go index 2a5a3ccec5..55815253be 100644 --- a/cmd/api/src/database/log.go +++ b/cmd/api/src/database/log.go @@ -19,6 +19,7 @@ package database import ( "context" "errors" + "fmt" "time" "github.com/specterops/bloodhound/log" @@ -72,7 +73,7 @@ func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() ( sql, rows := fc() if log.GlobalAccepts(log.LevelDebug) { - log.Errorf("Slow database query took %d ms addressing %d rows: %s", elapsed.Milliseconds(), rows, sql) + log.Errorf(fmt.Sprintf("Slow database query took %d ms addressing %d rows: %s", elapsed.Milliseconds(), rows, sql)) } else { log.Error().Stack().Msgf("Slow database query took %d ms addressing %d rows.", elapsed.Milliseconds(), rows) } @@ -80,7 +81,7 @@ func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() ( sql, rows := fc() if log.GlobalAccepts(log.LevelDebug) { - log.Warnf("Slow database query took %d ms addressing %d rows: %s", elapsed.Milliseconds(), rows, sql) + log.Warnf(fmt.Sprintf("Slow database query took %d ms addressing %d rows: %s", elapsed.Milliseconds(), rows, sql)) } else { log.Warn().Stack().Msgf("Slow database query took %d ms addressing %d rows.", elapsed.Milliseconds(), rows) } diff --git a/cmd/api/src/database/migration/stepwise.go b/cmd/api/src/database/migration/stepwise.go index bada240ce9..24e241663e 100644 --- a/cmd/api/src/database/migration/stepwise.go +++ b/cmd/api/src/database/migration/stepwise.go @@ -49,7 +49,7 @@ func (s *Migrator) ExecuteMigrations(manifest Manifest) error { } // execute the migration(s) for this version in a transaction - log.Infof("Executing SQL migrations for %s", versionString) + log.Infof(fmt.Sprintf("Executing SQL migrations for %s", versionString)) if err := s.DB.Transaction(func(tx *gorm.DB) error { for _, migration := range manifest.Migrations[versionString] { @@ -113,7 +113,7 @@ ALTER TABLE ONLY migrations ALTER COLUMN id SET DEFAULT nextval('migrations_id_s ALTER TABLE ONLY migrations ADD CONSTRAINT migrations_pkey PRIMARY KEY (id);` ) - log.Infof("Creating migration schema...") + log.Infof(fmt.Sprintf("Creating migration schema...")) if err := s.DB.Transaction(func(tx *gorm.DB) error { if result := tx.Exec(createMigrationTableSql); result.Error != nil { return fmt.Errorf("failed to creation migration table: %w", result.Error) @@ -167,7 +167,7 @@ func (s *Migrator) ExecuteStepwiseMigrations() error { return fmt.Errorf("failed to check if migration table exists: %w", err) } else if !hasTable { // no migration table, assume this is new installation - log.Infof("This is a new SQL database. Initializing schema...") + log.Infof(fmt.Sprintf("This is a new SQL database. Initializing schema...")) //initialize migration schema and generate full manifest if err = s.CreateMigrationSchema(); err != nil { return fmt.Errorf("failed to create migration schema: %w", err) @@ -185,7 +185,7 @@ func (s *Migrator) ExecuteStepwiseMigrations() error { // run migrations using the manifest we generated if len(manifest.VersionTable) == 0 { - log.Infof("No new SQL migrations to run") + log.Infof(fmt.Sprintf("No new SQL migrations to run")) return nil } else if err := s.ExecuteMigrations(manifest); err != nil { return fmt.Errorf("could not execute migrations: %w", err) diff --git a/cmd/api/src/migrations/graph.go b/cmd/api/src/migrations/graph.go index 0eddd0e8e6..c0158c267a 100644 --- a/cmd/api/src/migrations/graph.go +++ b/cmd/api/src/migrations/graph.go @@ -84,16 +84,16 @@ func GetMigrationData(ctx context.Context, db graph.Database) (version.Version, return err }); err != nil { - log.Warnf("Unable to fetch migration data from graph: %v", err) + log.Warnf(fmt.Sprintf("Unable to fetch migration data from graph: %v", err)) return currentMigration, ErrNoMigrationData } else if major, err := node.Properties.Get("Major").Int(); err != nil { - log.Warnf("Unable to get Major property from migration data node: %v", err) + log.Warnf(fmt.Sprintf("Unable to get Major property from migration data node: %v", err)) return currentMigration, ErrNoMigrationData } else if minor, err := node.Properties.Get("Minor").Int(); err != nil { - log.Warnf("unable to get Minor property from migration data node: %v", err) + log.Warnf(fmt.Sprintf("unable to get Minor property from migration data node: %v", err)) return currentMigration, ErrNoMigrationData } else if patch, err := node.Properties.Get("Patch").Int(); err != nil { - log.Warnf("unable to get Patch property from migration data node: %v", err) + log.Warnf(fmt.Sprintf("unable to get Patch property from migration data node: %v", err)) return currentMigration, ErrNoMigrationData } else { currentMigration.Major = major @@ -144,13 +144,13 @@ func (s *GraphMigrator) executeMigrations(ctx context.Context, originalVersion v for _, nextMigration := range Manifest { if nextMigration.Version.GreaterThan(mostRecentVersion) { - log.Infof("Graph migration version %s is greater than current version %s", nextMigration.Version, mostRecentVersion) + log.Infof(fmt.Sprintf("Graph migration version %s is greater than current version %s", nextMigration.Version, mostRecentVersion)) if err := nextMigration.Execute(s.db); err != nil { return fmt.Errorf("migration version %s failed: %w", nextMigration.Version.String(), err) } - log.Infof("Graph migration version %s executed successfully", nextMigration.Version) + log.Infof(fmt.Sprintf("Graph migration version %s executed successfully", nextMigration.Version)) mostRecentVersion = nextMigration.Version } } @@ -167,7 +167,7 @@ func (s *GraphMigrator) executeStepwiseMigrations(ctx context.Context) error { if errors.Is(err, ErrNoMigrationData) { currentVersion := version.GetVersion() - log.Infof("This is a new graph database. Creating a migration entry for GraphDB version %s", currentVersion) + log.Infof(fmt.Sprintf("This is a new graph database. Creating a migration entry for GraphDB version %s", currentVersion)) return CreateMigrationData(ctx, s.db, currentMigration) } else { return fmt.Errorf("unable to get graph db migration data: %w", err) diff --git a/cmd/api/src/migrations/manifest.go b/cmd/api/src/migrations/manifest.go index 98e3df5075..67d309c7d1 100644 --- a/cmd/api/src/migrations/manifest.go +++ b/cmd/api/src/migrations/manifest.go @@ -138,7 +138,7 @@ func Version_513_Migration(db graph.Database) error { } } - log.Infof("Migration removed all non-entity kinds from %d incorrectly labeled nodes", nodes.Len()) + log.Infof(fmt.Sprintf("Migration removed all non-entity kinds from %d incorrectly labeled nodes", nodes.Len())) return nil }); err != nil { return err @@ -193,7 +193,7 @@ func Version_277_Migration(db graph.Database) error { var dirty = false if objectId, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Error getting objectid for node %d: %v", node.ID, err) + log.Errorf(fmt.Sprintf("Error getting objectid for node %d: %v", node.ID, err)) continue } else if objectId != strings.ToUpper(objectId) { dirty = true @@ -224,7 +224,7 @@ func Version_277_Migration(db graph.Database) error { } else if node.Kinds.ContainsOneOf(azure.Entity) { identityKind = azure.Entity } else { - log.Errorf("Unable to figure out base kind of node %d", node.ID) + log.Errorf(fmt.Sprintf("Unable to figure out base kind of node %d", node.ID)) } if identityKind != nil { @@ -233,17 +233,17 @@ func Version_277_Migration(db graph.Database) error { IdentityKind: identityKind, IdentityProperties: []string{common.ObjectID.String()}, }); err != nil { - log.Errorf("Error updating node %d: %v", node.ID, err) + log.Errorf(fmt.Sprintf("Error updating node %d: %v", node.ID, err)) } } } if count++; count%10000 == 0 { - log.Infof("Completed %d nodes in migration", count) + log.Infof(fmt.Sprintf("Completed %d nodes in migration", count)) } } - log.Infof("Completed %d nodes in migration", count) + log.Infof(fmt.Sprintf("Completed %d nodes in migration", count)) return cursor.Error() }); err != nil { return err diff --git a/cmd/api/src/model/appcfg/parameter.go b/cmd/api/src/model/appcfg/parameter.go index e4d63a6468..730dce676e 100644 --- a/cmd/api/src/model/appcfg/parameter.go +++ b/cmd/api/src/model/appcfg/parameter.go @@ -170,10 +170,10 @@ func GetPasswordExpiration(ctx context.Context, service ParameterService) time.D var expiration PasswordExpiration if cfg, err := service.GetConfigurationParameter(ctx, PasswordExpirationWindow); err != nil { - log.Warnf("Failed to fetch password expiratio configuration; returning default values") + log.Warnf(fmt.Sprintf("Failed to fetch password expiratio configuration; returning default values")) return DefaultPasswordExpirationWindow } else if err := cfg.Map(&expiration); err != nil { - log.Warnf("Invalid password expiration configuration supplied; returning default values") + log.Warnf(fmt.Sprintf("Invalid password expiration configuration supplied; returning default values")) return DefaultPasswordExpirationWindow } @@ -194,9 +194,9 @@ func GetNeo4jParameters(ctx context.Context, service ParameterService) Neo4jPara } if neo4jParametersCfg, err := service.GetConfigurationParameter(ctx, Neo4jConfigs); err != nil { - log.Warnf("Failed to fetch neo4j configuration; returning default values") + log.Warnf(fmt.Sprintf("Failed to fetch neo4j configuration; returning default values")) } else if err = neo4jParametersCfg.Map(&result); err != nil { - log.Warnf("Invalid neo4j configuration supplied; returning default values") + log.Warnf(fmt.Sprintf("Invalid neo4j configuration supplied; returning default values")) } return result @@ -212,9 +212,9 @@ func GetCitrixRDPSupport(ctx context.Context, service ParameterService) bool { var result CitrixRDPSupport if cfg, err := service.GetConfigurationParameter(ctx, CitrixRDPSupportKey); err != nil { - log.Warnf("Failed to fetch CitrixRDPSupport configuration; returning default values") + log.Warnf(fmt.Sprintf("Failed to fetch CitrixRDPSupport configuration; returning default values")) } else if err := cfg.Map(&result); err != nil { - log.Warnf("Invalid CitrixRDPSupport configuration supplied, %v. returning default values.", err) + log.Warnf(fmt.Sprintf("Invalid CitrixRDPSupport configuration supplied, %v. returning default values.", err)) } return result.Enabled @@ -260,9 +260,9 @@ func GetPruneTTLParameters(ctx context.Context, service ParameterService) PruneT } if pruneTTLParametersCfg, err := service.GetConfigurationParameter(ctx, PruneTTL); err != nil { - log.Warnf("Failed to fetch prune TTL configuration; returning default values") + log.Warnf(fmt.Sprintf("Failed to fetch prune TTL configuration; returning default values")) } else if err = pruneTTLParametersCfg.Map(&result); err != nil { - log.Warnf("Invalid prune TTL configuration supplied; returning default values %+v", err) + log.Warnf(fmt.Sprintf("Invalid prune TTL configuration supplied; returning default values %+v", err)) } return result @@ -278,9 +278,9 @@ func GetReconciliationParameter(ctx context.Context, service ParameterService) b result := ReconciliationParameter{Enabled: true} if cfg, err := service.GetConfigurationParameter(ctx, ReconciliationKey); err != nil { - log.Warnf("Failed to fetch reconciliation configuration; returning default values") + log.Warnf(fmt.Sprintf("Failed to fetch reconciliation configuration; returning default values")) } else if err := cfg.Map(&result); err != nil { - log.Warnf("Invalid reconciliation configuration supplied, %v. returning default values.", err) + log.Warnf(fmt.Sprintf("Invalid reconciliation configuration supplied, %v. returning default values.", err)) } return result.Enabled diff --git a/cmd/api/src/model/audit.go b/cmd/api/src/model/audit.go index c5f5d68f10..22e9c6efe4 100644 --- a/cmd/api/src/model/audit.go +++ b/cmd/api/src/model/audit.go @@ -228,7 +228,7 @@ func (s AuditEntry) String() string { func NewAuditEntry(action AuditLogAction, status AuditLogEntryStatus, data AuditData) (AuditEntry, error) { if commitId, err := uuid.NewV4(); err != nil { - log.Errorf("Error generating commit ID for audit entry: %s", err.Error()) + log.Errorf(fmt.Sprintf("Error generating commit ID for audit entry: %s", err.Error())) return AuditEntry{}, err } else { return AuditEntry{Action: action, Model: data, Status: status, CommitID: commitId}, nil diff --git a/cmd/api/src/model/samlprovider.go b/cmd/api/src/model/samlprovider.go index a851d140a0..e0455f7487 100644 --- a/cmd/api/src/model/samlprovider.go +++ b/cmd/api/src/model/samlprovider.go @@ -18,6 +18,7 @@ package model import ( "errors" + "fmt" "net/url" "path" @@ -139,7 +140,7 @@ func assertionFindString(assertion *saml.Assertion, names ...string) (string, er return value.Value, nil } } - log.Warnf("[SAML] Found attribute values for attribute %s however none of the values have an XML type of %s. Choosing the first value.", ObjectIDAttributeNameFormat, XMLTypeString) + log.Warnf(fmt.Sprintf("[SAML] Found attribute values for attribute %s however none of the values have an XML type of %s. Choosing the first value.", ObjectIDAttributeNameFormat, XMLTypeString)) return attribute.Values[0].Value, nil } } @@ -153,7 +154,7 @@ func (s SAMLProvider) GetSAMLUserPrincipalNameFromAssertion(assertion *saml.Asse for _, attrStmt := range assertion.AttributeStatements { for _, attr := range attrStmt.Attributes { for _, value := range attr.Values { - log.Infof("[SAML] Assertion contains attribute: %s - %s=%v", attr.NameFormat, attr.Name, value) + log.Infof(fmt.Sprintf("[SAML] Assertion contains attribute: %s - %s=%v", attr.NameFormat, attr.Name, value)) } } } diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index 465e9762ca..81d5b4b58b 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -445,13 +445,13 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i ) if bhCtxInst.Timeout > maxTimeout { - log.Debugf("Custom timeout is too large, using the maximum allowable timeout of %d minutes instead", maxTimeout.Minutes()) + log.Debugf(fmt.Sprintf("Custom timeout is too large, using the maximum allowable timeout of %d minutes instead", maxTimeout.Minutes())) bhCtxInst.Timeout = maxTimeout } availableRuntime := bhCtxInst.Timeout if availableRuntime > 0 { - log.Debugf("Available timeout for query is set to: %d seconds", availableRuntime.Seconds()) + log.Debugf(fmt.Sprintf("Available timeout for query is set to: %d seconds", availableRuntime.Seconds())) } else { availableRuntime = defaultTimeout if !s.DisableCypherComplexityLimit { @@ -494,7 +494,7 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i timeoutLog.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) timeoutLog.Msg("Neo4j timed out while executing cypher query") } else { - log.Warnf("RawCypherQuery failed: %v", err) + log.Warnf(fmt.Sprintf("RawCypherQuery failed: %v", err)) } return graphResponse, err } @@ -631,15 +631,15 @@ func (s *GraphQuery) GetEntityCountResults(ctx context.Context, node *graph.Node for delegateKey, delegate := range delegates { waitGroup.Add(1) - log.Infof("Running entity query %s", delegateKey) + log.Infof(fmt.Sprintf("Running entity query %s", delegateKey)) go func(delegateKey string, delegate any) { defer waitGroup.Done() if result, err := runEntityQuery(ctx, s.Graph, delegate, node, 0, 0); errors.Is(err, graph.ErrContextTimedOut) { - log.Warnf("Running entity query for key %s: %v", delegateKey, err) + log.Warnf(fmt.Sprintf("Running entity query for key %s: %v", delegateKey, err)) } else if err != nil { - log.Errorf("Error running entity query for key %s: %v", delegateKey, err) + log.Errorf(fmt.Sprintf("Error running entity query for key %s: %v", delegateKey, err)) data.Store(delegateKey, 0) } else { data.Store(delegateKey, result.Len()) @@ -787,11 +787,11 @@ func (s *GraphQuery) cacheQueryResult(queryStart time.Time, cacheKey string, res // Using GuardedSet here even though it isn't necessary because it allows us to collect information on how often // we run these queries in parallel if set, sizeInBytes, err := s.Cache.GuardedSet(cacheKey, result); err != nil { - log.Errorf("[Entity Results Cache] Failed to write results to cache for key: %s", cacheKey) + log.Errorf(fmt.Sprintf("[Entity Results Cache] Failed to write results to cache for key: %s", cacheKey)) } else if !set { - log.Warnf("[Entity Results Cache] Cache entry for query %s not set because it already exists", cacheKey) + log.Warnf(fmt.Sprintf("[Entity Results Cache] Cache entry for query %s not set because it already exists", cacheKey)) } else { - log.Infof("[Entity Results Cache] Cached slow query %s (%d bytes) because it took %dms", cacheKey, sizeInBytes, queryTime) + log.Infof(fmt.Sprintf("[Entity Results Cache] Cached slow query %s (%d bytes) because it took %dms", cacheKey, sizeInBytes, queryTime)) } } } @@ -938,14 +938,14 @@ func fromGraphNodes(nodes graph.NodeSet) []model.PagedNodeListEntry { ) if objectId, err := props.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Error getting objectid for %d: %v", node.ID, err) + log.Errorf(fmt.Sprintf("Error getting objectid for %d: %v", node.ID, err)) nodeEntry.ObjectID = "" } else { nodeEntry.ObjectID = objectId } if name, err := props.Get(common.Name.String()).String(); err != nil { - log.Errorf("Error getting name for %d: %v", node.ID, err) + log.Errorf(fmt.Sprintf("Error getting name for %d: %v", node.ID, err)) nodeEntry.Name = "" } else { nodeEntry.Name = name diff --git a/cmd/api/src/services/agi/agi.go b/cmd/api/src/services/agi/agi.go index cbb72cf5bb..d6a1092c7c 100644 --- a/cmd/api/src/services/agi/agi.go +++ b/cmd/api/src/services/agi/agi.go @@ -19,6 +19,7 @@ package agi import ( "context" + "fmt" "slices" "strings" @@ -91,7 +92,7 @@ func RunAssetGroupIsolationCollections(ctx context.Context, db AgiData, graphDB idx := 0 for _, node := range assetGroupNodes { if objectID, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Node %d that does not have valid %s property", node.ID, common.ObjectID) + log.Errorf(fmt.Sprintf("Node %d that does not have valid %s property", node.ID, common.ObjectID)) } else { entries[idx] = model.AssetGroupCollectionEntry{ ObjectID: objectID, diff --git a/cmd/api/src/services/dataquality/dataquality.go b/cmd/api/src/services/dataquality/dataquality.go index 5770ec998e..606e09e500 100644 --- a/cmd/api/src/services/dataquality/dataquality.go +++ b/cmd/api/src/services/dataquality/dataquality.go @@ -36,7 +36,7 @@ type DataQualityData interface { } func SaveDataQuality(ctx context.Context, db DataQualityData, graphDB graph.Database) error { - log.Infof("Started Data Quality Stats Collection") + log.Infof(fmt.Sprintf("Started Data Quality Stats Collection")) defer log.Measure(log.LevelInfo, "Successfully Completed Data Quality Stats Collection")() if stats, aggregation, err := ad.GraphStats(ctx, graphDB); err != nil { diff --git a/cmd/api/src/services/entrypoint.go b/cmd/api/src/services/entrypoint.go index 935fe42396..e7f7ac4d51 100644 --- a/cmd/api/src/services/entrypoint.go +++ b/cmd/api/src/services/entrypoint.go @@ -83,7 +83,7 @@ func Entrypoint(ctx context.Context, cfg config.Configuration, connections boots } else if err := connections.Graph.SetDefaultGraph(ctx, schema.DefaultGraph()); err != nil { return nil, fmt.Errorf("no default graph found but migrations are disabled per configuration: %w", err) } else { - log.Infof("Database migrations are disabled per configuration") + log.Infof(fmt.Sprintf("Database migrations are disabled per configuration")) } if apiCache, err := cache.NewCache(cache.Config{MaxSize: cfg.MaxAPICacheSize}); err != nil { @@ -112,7 +112,7 @@ func Entrypoint(ctx context.Context, cfg config.Configuration, connections boots // Trigger analysis on first start if err := connections.RDMS.RequestAnalysis(ctx, "init"); err != nil { - log.Warnf("failed to request init analysis: %v", err) + log.Warnf(fmt.Sprintf("failed to request init analysis: %v", err)) } return []daemons.Daemon{ diff --git a/cmd/api/src/services/fileupload/file_upload.go b/cmd/api/src/services/fileupload/file_upload.go index 6ca239f7cb..3b9e3ff648 100644 --- a/cmd/api/src/services/fileupload/file_upload.go +++ b/cmd/api/src/services/fileupload/file_upload.go @@ -64,17 +64,17 @@ func ProcessStaleFileUploadJobs(ctx context.Context, db FileUploadData) { ) if jobs, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusRunning); err != nil { - log.Errorf("Error getting running jobs: %v", err) + log.Errorf(fmt.Sprintf("Error getting running jobs: %v", err)) } else { for _, job := range jobs { if job.LastIngest.Before(threshold) { - log.Warnf("Ingest timeout: No ingest activity observed for Job ID %d in %f minutes (last ingest was %s). Upload incomplete", + log.Warnf(fmt.Sprintf("Ingest timeout: No ingest activity observed for Job ID %d in %f minutes (last ingest was %s)). Upload incomplete", job.ID, now.Sub(threshold).Minutes(), - job.LastIngest.Format(time.RFC3339)) + job.LastIngest.Format(time.RFC3339))) if err := TimeOutUploadJob(ctx, db, job.ID, fmt.Sprintf("Ingest timeout: No ingest activity observed in %f minutes. Upload incomplete.", now.Sub(threshold).Minutes())); err != nil { - log.Errorf("Error marking file upload job %d as timed out: %v", job.ID, err) + log.Errorf(fmt.Sprintf("Error marking file upload job %d as timed out: %v", job.ID, err)) } } } @@ -145,14 +145,14 @@ type FileValidator func(src io.Reader, dst io.Writer) error func WriteAndValidateFile(fileData io.ReadCloser, tempFile *os.File, validationFunc FileValidator) error { if err := validationFunc(fileData, tempFile); err != nil { if err := tempFile.Close(); err != nil { - log.Errorf("Error closing temp file %s with failed validation: %v", tempFile.Name(), err) + log.Errorf(fmt.Sprintf("Error closing temp file %s with failed validation: %v", tempFile.Name(), err)) } else if err := os.Remove(tempFile.Name()); err != nil { - log.Errorf("Error deleting temp file %s: %v", tempFile.Name(), err) + log.Errorf(fmt.Sprintf("Error deleting temp file %s: %v", tempFile.Name(), err)) } return err } else { if err := tempFile.Close(); err != nil { - log.Errorf("Error closing temp file with successful validation %s: %v", tempFile.Name(), err) + log.Errorf(fmt.Sprintf("Error closing temp file with successful validation %s: %v", tempFile.Name(), err)) } return nil } diff --git a/cmd/api/src/services/fileupload/validation.go b/cmd/api/src/services/fileupload/validation.go index 5485daa716..e354accdb8 100644 --- a/cmd/api/src/services/fileupload/validation.go +++ b/cmd/api/src/services/fileupload/validation.go @@ -19,6 +19,7 @@ package fileupload import ( "encoding/json" "errors" + "fmt" "io" "github.com/specterops/bloodhound/log" @@ -72,7 +73,7 @@ func ValidateMetaTag(reader io.Reader, readToEnd bool) (ingest.Metadata, error) case string: if !metaTagFound && depth == 1 && typed == "meta" { if err := decoder.Decode(&meta); err != nil { - log.Warnf("Found invalid metatag, skipping") + log.Warnf(fmt.Sprintf("Found invalid metatag, skipping")) } else if meta.Type.IsValid() { metaTagFound = true } diff --git a/cmd/api/src/test/lab/fixtures/api.go b/cmd/api/src/test/lab/fixtures/api.go index 12d74465da..b730553c1b 100644 --- a/cmd/api/src/test/lab/fixtures/api.go +++ b/cmd/api/src/test/lab/fixtures/api.go @@ -88,7 +88,7 @@ func NewCustomApiFixture(cfgFixture *lab.Fixture[config.Configuration]) *lab.Fix }) if err := lab.SetDependency(fixture, cfgFixture); err != nil { - log.Fatalf("BHApiFixture dependency error: %v", err) + log.Fatalf(fmt.Sprintf("BHApiFixture dependency error: %v", err)) } return fixture diff --git a/cmd/api/src/utils/validation/duration_validator.go b/cmd/api/src/utils/validation/duration_validator.go index bfbf32e89a..c15a4d6bc9 100644 --- a/cmd/api/src/utils/validation/duration_validator.go +++ b/cmd/api/src/utils/validation/duration_validator.go @@ -42,7 +42,7 @@ func NewDurationValidator(params map[string]string) Validator { if minD, ok := params["min"]; ok { validator.min = params["min"] if duration, err := iso8601.FromString(minD); err != nil { - log.Warnf("NewDurationValidator invalid min limit provided %s", minD) + log.Warnf(fmt.Sprintf("NewDurationValidator invalid min limit provided %s", minD)) } else { validator.minD = duration.ToDuration() } @@ -51,7 +51,7 @@ func NewDurationValidator(params map[string]string) Validator { if maxD, ok := params["max"]; ok { validator.max = params["max"] if duration, err := iso8601.FromString(maxD); err != nil { - log.Warnf("NewDurationValidator invalid max limit provided %s", maxD) + log.Warnf(fmt.Sprintf("NewDurationValidator invalid max limit provided %s", maxD)) } else { validator.maxD = duration.ToDuration() } diff --git a/packages/go/analysis/ad/ad.go b/packages/go/analysis/ad/ad.go index 48b85cfc0a..340dc36522 100644 --- a/packages/go/analysis/ad/ad.go +++ b/packages/go/analysis/ad/ad.go @@ -200,9 +200,9 @@ func grabDomainInformation(tx graph.Transaction) (map[string]string, error) { }).Fetch(func(cursor graph.Cursor[*graph.Node]) error { for node := range cursor.Chan() { if domainObjectID, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Domain node %d does not have a valid object ID", node.ID) + log.Errorf(fmt.Sprintf("Domain node %d does not have a valid object ID", node.ID)) } else if domainName, err := node.Properties.Get(common.Name.String()).String(); err != nil { - log.Errorf("Domain node %d does not have a valid name", node.ID) + log.Errorf(fmt.Sprintf("Domain node %d does not have a valid name", node.ID)) } else { domainNamesByObjectID[domainObjectID] = domainName } @@ -231,9 +231,9 @@ func LinkWellKnownGroups(ctx context.Context, db graph.Database) error { for _, domain := range domains { if domainSid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Errorf("Error getting domain sid for domain %d: %v", domain.ID, err) + log.Errorf(fmt.Sprintf("Error getting domain sid for domain %d: %v", domain.ID, err)) } else if domainName, err := domain.Properties.Get(common.Name.String()).String(); err != nil { - log.Errorf("Error getting domain name for domain %d: %v", domain.ID, err) + log.Errorf(fmt.Sprintf("Error getting domain name for domain %d: %v", domain.ID, err)) } else { var ( domainId = domain.ID @@ -262,7 +262,7 @@ func LinkWellKnownGroups(ctx context.Context, db graph.Database) error { return nil } }); err != nil { - log.Errorf("Error linking well known groups for domain %d: %v", domain.ID, err) + log.Errorf(fmt.Sprintf("Error linking well known groups for domain %d: %v", domain.ID, err)) errors.Add(fmt.Errorf("failed linking well known groups for domain %d: %w", domain.ID, err)) } } @@ -319,7 +319,7 @@ func createOrUpdateWellKnownLink(tx graph.Transaction, startNode *graph.Node, en // See CalculateCrossProductNodeSetsDoc.md for explaination of the specialGroups (Authenticated Users and Everyone) and why we treat them the way we do func CalculateCrossProductNodeSets(tx graph.Transaction, domainsid string, groupExpansions impact.PathAggregator, nodeSlices ...[]*graph.Node) cardinality.Duplex[uint64] { if len(nodeSlices) < 2 { - log.Errorf("Cross products require at least 2 nodesets") + log.Errorf(fmt.Sprintf("Cross products require at least 2 nodesets")) return cardinality.NewBitmap64() } @@ -343,7 +343,7 @@ func CalculateCrossProductNodeSets(tx graph.Transaction, domainsid string, group specialGroups, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid) if err != nil { - log.Errorf("Could not fetch groups: %s", err.Error()) + log.Errorf(fmt.Sprintf("Could not fetch groups: %s", err.Error())) } //Unroll all nodesets diff --git a/packages/go/analysis/ad/adcs.go b/packages/go/analysis/ad/adcs.go index ac1f9a76cb..d415a656b1 100644 --- a/packages/go/analysis/ad/adcs.go +++ b/packages/go/analysis/ad/adcs.go @@ -115,99 +115,99 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostGoldenCert(ctx, tx, outC, domain, enterpriseCA); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.GoldenCert.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.GoldenCert.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.GoldenCert.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.GoldenCert.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC1(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC1.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC1.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC1.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC1.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC3(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC3.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC3.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC3.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC3.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC4(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC4.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC4.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC4.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC4.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC6a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC6a.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6a.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC6a.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6a.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC6b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC6b.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6b.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC6b.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6b.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC9a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC9a.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9a.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC9a.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9a.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC9b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC9b.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9b.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC9b.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9b.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC10a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC10a.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10a.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC10a.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10a.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC10b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC10b.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10b.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC10b.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10b.String(), err)) } return nil }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC13(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Post processing for %s: %v", ad.ADCSESC13.String(), err) + log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC13.String(), err)) } else if err != nil { - log.Errorf("Failed post processing for %s: %v", ad.ADCSESC13.String(), err) + log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC13.String(), err)) } return nil }) diff --git a/packages/go/analysis/ad/adcscache.go b/packages/go/analysis/ad/adcscache.go index 1bf53e6256..b2be8ca8e3 100644 --- a/packages/go/analysis/ad/adcscache.go +++ b/packages/go/analysis/ad/adcscache.go @@ -18,6 +18,7 @@ package ad import ( "context" + "fmt" "sync" "github.com/specterops/bloodhound/dawgs/cardinality" @@ -69,15 +70,15 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris for _, ct := range certTemplates { // cert template enrollers if firstDegreePrincipals, err := fetchFirstDegreeNodes(tx, ct, ad.Enroll, ad.GenericAll, ad.AllExtendedRights); err != nil { - log.Errorf("Error fetching enrollers for cert template %d: %v", ct.ID, err) + log.Errorf(fmt.Sprintf("Error fetching enrollers for cert template %d: %v", ct.ID, err)) } else { s.certTemplateEnrollers[ct.ID] = firstDegreePrincipals.Slice() // Check if Auth. Users or Everyone has enroll if domainsid, err := ct.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for certtemplate %d: %v", ct.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for certtemplate %d: %v", ct.ID, err)) } else if authUsersOrEveryoneHasEnroll, err := containsAuthUsersOrEveryone(tx, firstDegreePrincipals.Slice(), domainsid); err != nil { - log.Errorf("Error fetching if auth. users or everyone has enroll on certtemplate %d: %v", ct.ID, err) + log.Errorf(fmt.Sprintf("Error fetching if auth. users or everyone has enroll on certtemplate %d: %v", ct.ID, err)) } else { s.certTemplateHasSpecialEnrollers[ct.ID] = authUsersOrEveryoneHasEnroll } @@ -85,7 +86,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris // cert template controllers if firstDegreePrincipals, err := fetchFirstDegreeNodes(tx, ct, ad.Owns, ad.GenericAll, ad.WriteDACL, ad.WriteOwner); err != nil { - log.Errorf("Error fetching controllers for cert template %d: %v", ct.ID, err) + log.Errorf(fmt.Sprintf("Error fetching controllers for cert template %d: %v", ct.ID, err)) } else { s.certTemplateControllers[ct.ID] = firstDegreePrincipals.Slice() } @@ -94,22 +95,22 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris for _, eca := range enterpriseCAs { if firstDegreeEnrollers, err := fetchFirstDegreeNodes(tx, eca, ad.Enroll); err != nil { - log.Errorf("Error fetching enrollers for enterprise ca %d: %v", eca.ID, err) + log.Errorf(fmt.Sprintf("Error fetching enrollers for enterprise ca %d: %v", eca.ID, err)) } else { s.enterpriseCAEnrollers[eca.ID] = firstDegreeEnrollers.Slice() // Check if Auth. Users or Everyone has enroll if domainsid, err := eca.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for eca %d: %v", eca.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for eca %d: %v", eca.ID, err)) } else if authUsersOrEveryoneHasEnroll, err := containsAuthUsersOrEveryone(tx, firstDegreeEnrollers.Slice(), domainsid); err != nil { - log.Errorf("Error fetching if auth. users or everyone has enroll on enterprise ca %d: %v", eca.ID, err) + log.Errorf(fmt.Sprintf("Error fetching if auth. users or everyone has enroll on enterprise ca %d: %v", eca.ID, err)) } else { s.enterpriseCAHasSpecialEnrollers[eca.ID] = authUsersOrEveryoneHasEnroll } } if publishedTemplates, err := FetchCertTemplatesPublishedToCA(tx, eca); err != nil { - log.Errorf("Error fetching published cert templates for enterprise ca %d: %v", eca.ID, err) + log.Errorf(fmt.Sprintf("Error fetching published cert templates for enterprise ca %d: %v", eca.ID, err)) } else { s.publishedTemplateCache[eca.ID] = publishedTemplates.Slice() } @@ -117,9 +118,9 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris for _, domain := range domains { if rootCaForNodes, err := FetchEnterpriseCAsRootCAForPathToDomain(tx, domain); err != nil { - log.Errorf("Error getting cas via rootcafor for domain %d: %v", domain.ID, err) + log.Errorf(fmt.Sprintf("Error getting cas via rootcafor for domain %d: %v", domain.ID, err)) } else if authStoreForNodes, err := FetchEnterpriseCAsTrustedForNTAuthToDomain(tx, domain); err != nil { - log.Errorf("Error getting cas via authstorefor for domain %d: %v", domain.ID, err) + log.Errorf(fmt.Sprintf("Error getting cas via authstorefor for domain %d: %v", domain.ID, err)) } else { s.authStoreForChainValid[domain.ID] = graph.NodeSetToDuplex(authStoreForNodes) s.rootCAForChainValid[domain.ID] = graph.NodeSetToDuplex(rootCaForNodes) @@ -127,13 +128,13 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris // Check for weak cert config on DCs if upnMapping, err := hasUPNCertMappingInForest(tx, domain); err != nil { - log.Warnf("Error checking hasUPNCertMappingInForest for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error checking hasUPNCertMappingInForest for domain %d: %v", domain.ID, err)) return nil } else if upnMapping { s.hasUPNCertMappingInForest.Add(domain.ID.Uint64()) } if weakCertBinding, err := hasWeakCertBindingInForest(tx, domain); err != nil { - log.Warnf("Error checking hasWeakCertBindingInForest for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error checking hasWeakCertBindingInForest for domain %d: %v", domain.ID, err)) return nil } else if weakCertBinding { s.hasWeakCertBindingInForest.Add(domain.ID.Uint64()) @@ -143,10 +144,10 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris return nil }) if err != nil { - log.Errorf("Error building adcs cache %v", err) + log.Errorf(fmt.Sprintf("Error building adcs cache %v", err)) } - log.Infof("Finished building adcs cache") + log.Infof(fmt.Sprintf("Finished building adcs cache")) } func (s *ADCSCache) DoesCAChainProperlyToDomain(enterpriseCA, domain *graph.Node) bool { @@ -248,7 +249,7 @@ func hasUPNCertMappingInForest(tx graph.Transaction, domain *graph.Node) (bool, } else { for _, trustedByDomain := range trustedByNodes { if dcForNodes, err := FetchNodesWithDCForEdge(tx, trustedByDomain); err != nil { - log.Warnf("unable to fetch DCFor nodes in hasUPNCertMappingInForest: %v", err) + log.Warnf(fmt.Sprintf("unable to fetch DCFor nodes in hasUPNCertMappingInForest: %v", err)) continue } else { for _, dcForNode := range dcForNodes { @@ -273,7 +274,7 @@ func hasWeakCertBindingInForest(tx graph.Transaction, domain *graph.Node) (bool, } else { for _, trustedByDomain := range trustedByNodes { if dcForNodes, err := FetchNodesWithDCForEdge(tx, trustedByDomain); err != nil { - log.Warnf("unable to fetch DCFor nodes in hasWeakCertBindingInForest: %v", err) + log.Warnf(fmt.Sprintf("unable to fetch DCFor nodes in hasWeakCertBindingInForest: %v", err)) continue } else { for _, dcForNode := range dcForNodes { diff --git a/packages/go/analysis/ad/esc1.go b/packages/go/analysis/ad/esc1.go index 92eb90bed1..82023fc0c3 100644 --- a/packages/go/analysis/ad/esc1.go +++ b/packages/go/analysis/ad/esc1.go @@ -18,6 +18,7 @@ package ad import ( "context" + "fmt" "sync" "github.com/specterops/bloodhound/analysis" @@ -40,12 +41,12 @@ func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysi ecaEnrollers := cache.GetEnterpriseCAEnrollers(enterpriseCA.ID) for _, certTemplate := range publishedCertTemplates { if valid, err := isCertTemplateValidForEsc1(certTemplate); err != nil { - log.Warnf("Error validating cert template %d: %v", certTemplate.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) continue } else if !valid { continue } else if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error validating cert template %d: %v", certTemplate.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) continue } else { results.Or(CalculateCrossProductNodeSets(tx, domainsid, expandedGroups, cache.GetCertTemplateEnrollers(certTemplate.ID), ecaEnrollers)) @@ -201,7 +202,7 @@ func GetADCSESC1EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", endNode.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { @@ -290,16 +291,16 @@ func getGoldenCertEdgeComposition(tx graph.Transaction, edge *graph.Relationship query.KindIn(query.End(), ad.EnterpriseCA), query.KindIn(query.Relationship(), ad.HostsCAService), ))); err != nil { - log.Errorf("Error getting hostscaservice edge to enterprise ca for computer %d : %v", startNode.ID, err) + log.Errorf(fmt.Sprintf("Error getting hostscaservice edge to enterprise ca for computer %d : %v", startNode.ID, err)) } else { for _, ecaPath := range ecaPaths { eca := ecaPath.Terminal() if chainToRootCAPaths, err := FetchEnterpriseCAsCertChainPathToDomain(tx, eca, targetDomainNode); err != nil { - log.Errorf("Error getting eca %d path to domain %d: %v", eca.ID, targetDomainNode.ID, err) + log.Errorf(fmt.Sprintf("Error getting eca %d path to domain %d: %v", eca.ID, targetDomainNode.ID, err)) } else if chainToRootCAPaths.Len() == 0 { continue } else if trustedForAuthPaths, err := FetchEnterpriseCAsTrustedForAuthPathToDomain(tx, eca, targetDomainNode); err != nil { - log.Errorf("Error getting eca %d path to domain %d via trusted for auth: %v", eca.ID, targetDomainNode.ID, err) + log.Errorf(fmt.Sprintf("Error getting eca %d path to domain %d via trusted for auth: %v", eca.ID, targetDomainNode.ID, err)) } else if trustedForAuthPaths.Len() == 0 { continue } else { diff --git a/packages/go/analysis/ad/esc10.go b/packages/go/analysis/ad/esc10.go index d62863dd2a..9c3d4e15ba 100644 --- a/packages/go/analysis/ad/esc10.go +++ b/packages/go/analysis/ad/esc10.go @@ -18,6 +18,7 @@ package ad import ( "context" + "fmt" "sync" "github.com/specterops/bloodhound/analysis" @@ -45,21 +46,21 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC10(template, false); err != nil { - log.Warnf("Error validating cert template %d: %v", template.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf("Failed to retrieve enrollers for cert template %d from cache", template.ID) + log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if filteredVictims, err := filterUserDNSResults(tx, victimBitmap, template); err != nil { - log.Warnf("Error filtering users from victims for esc9a: %v", err) + log.Warnf(fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) continue } else if attackers, err := FetchAttackersForEscalations9and10(tx, filteredVictims, false); err != nil { - log.Warnf("Error getting start nodes for esc10a attacker nodes: %v", err) + log.Warnf(fmt.Sprintf("Error getting start nodes for esc10a attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) @@ -91,18 +92,18 @@ func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analy for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC10(template, true); err != nil { - log.Warnf("Error validating cert template %d: %v", template.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf("Failed to retrieve enrollers for cert template %d from cache", template.ID) + log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(enterpriseCA.ID)) if attackers, err := FetchAttackersForEscalations9and10(tx, victimBitmap, true); err != nil { - log.Warnf("Error getting start nodes for esc10b attacker nodes: %v", err) + log.Warnf(fmt.Sprintf("Error getting start nodes for esc10b attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) diff --git a/packages/go/analysis/ad/esc13.go b/packages/go/analysis/ad/esc13.go index e1c83fd542..d358533ffc 100644 --- a/packages/go/analysis/ad/esc13.go +++ b/packages/go/analysis/ad/esc13.go @@ -19,6 +19,7 @@ package ad import ( "context" "errors" + "fmt" "sync" "github.com/specterops/bloodhound/analysis" @@ -35,7 +36,7 @@ import ( func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca, domain *graph.Node, cache ADCSCache) error { if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { return nil @@ -43,19 +44,19 @@ func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analys ecaEnrollers := cache.GetEnterpriseCAEnrollers(eca.ID) for _, template := range publishedCertTemplates { if isValid, err := isCertTemplateValidForESC13(template); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Checking esc13 cert template PostADCSESC13: %v", err) + log.Warnf(fmt.Sprintf("Checking esc13 cert template PostADCSESC13: %v", err)) } else if err != nil { - log.Errorf("Error checking esc13 cert template PostADCSESC13: %v", err) + log.Errorf(fmt.Sprintf("Error checking esc13 cert template PostADCSESC13: %v", err)) } else if !isValid { continue } else if groupNodes, err := getCertTemplateGroupLinks(template, tx); err != nil { - log.Errorf("Error getting cert template group links: %v", err) + log.Errorf(fmt.Sprintf("Error getting cert template group links: %v", err)) } else if len(groupNodes) == 0 { continue } else { controlBitmap := CalculateCrossProductNodeSets(tx, domainsid, groupExpansions, ecaEnrollers, cache.GetCertTemplateEnrollers(template.ID)) if filtered, err := filterUserDNSResults(tx, controlBitmap, template); err != nil { - log.Warnf("Error filtering users from victims for esc13: %v", err) + log.Warnf(fmt.Sprintf("Error filtering users from victims for esc13: %v", err)) continue } else { for _, group := range groupNodes.Slice() { @@ -115,7 +116,7 @@ func groupIsContainedOrTrusted(tx graph.Transaction, group, domain *graph.Node) ) if err := ops.Traversal(tx, traversalPlan, pathVisitor); err != nil { - log.Debugf("groupIsContainedOrTrusted traversal error: %v", err) + log.Debugf(fmt.Sprintf("groupIsContainedOrTrusted traversal error: %v", err)) } return matchFound @@ -223,7 +224,7 @@ func GetADCSESC13EdgeComposition(ctx context.Context, db graph.Database, edge *g // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", endNode.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc3.go b/packages/go/analysis/ad/esc3.go index 28abe1fcec..d06cce81ad 100644 --- a/packages/go/analysis/ad/esc3.go +++ b/packages/go/analysis/ad/esc3.go @@ -38,7 +38,7 @@ import ( func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca2, domain *graph.Node, cache ADCSCache) error { results := cardinality.NewBitmap64() if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if publishedCertTemplates := cache.GetPublishedTemplateCache(eca2.ID); len(publishedCertTemplates) == 0 { return nil @@ -68,7 +68,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi ) })); err != nil { if !graph.IsErrNotFound(err) { - log.Errorf("Error getting target nodes for esc3 for node %d: %v", certTemplateTwo.ID, err) + log.Errorf(fmt.Sprintf("Error getting target nodes for esc3 for node %d: %v", certTemplateTwo.ID, err)) } } else { for _, certTemplateOne := range inboundTemplates { @@ -83,12 +83,12 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi ) if publishedECAs, err := FetchCertTemplateCAs(tx, certTemplateOne); err != nil { - log.Errorf("Error getting cas for cert template %d: %v", certTemplateOne.ID, err) + log.Errorf(fmt.Sprintf("Error getting cas for cert template %d: %v", certTemplateOne.ID, err)) } else if publishedECAs.Len() == 0 { continue } else if eARestrictions { if delegatedAgents, err := fetchFirstDegreeNodes(tx, certTemplateTwo, ad.DelegatedEnrollmentAgent); err != nil { - log.Errorf("Error getting delegated agents for cert template %d: %v", certTemplateTwo.ID, err) + log.Errorf(fmt.Sprintf("Error getting delegated agents for cert template %d: %v", certTemplateTwo.ID, err)) } else { for _, eca1 := range publishedECAs { tempResults := CalculateCrossProductNodeSets(tx, @@ -102,7 +102,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi // Add principals to result set unless it's a user and DNS is required if filteredResults, err := filterUserDNSResults(tx, tempResults, certTemplateOne); err != nil { - log.Errorf("Error filtering user dns results: %v", err) + log.Errorf(fmt.Sprintf("Error filtering user dns results: %v", err)) } else { results.Or(filteredResults) } @@ -119,7 +119,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi ecaEnrollersTwo) if filteredResults, err := filterUserDNSResults(tx, tempResults, certTemplateOne); err != nil { - log.Errorf("Error filtering user dns results: %v", err) + log.Errorf(fmt.Sprintf("Error filtering user dns results: %v", err)) } else { results.Or(filteredResults) } @@ -147,15 +147,15 @@ func PostEnrollOnBehalfOf(domains, enterpriseCertAuthorities, certTemplates []*g versionTwoTemplates := make([]*graph.Node, 0) for _, node := range certTemplates { if version, err := node.Properties.Get(ad.SchemaVersion.String()).Float64(); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Did not get schema version for cert template %d: %v", node.ID, err) + log.Warnf(fmt.Sprintf("Did not get schema version for cert template %d: %v", node.ID, err)) } else if err != nil { - log.Errorf("Error getting schema version for cert template %d: %v", node.ID, err) + log.Errorf(fmt.Sprintf("Error getting schema version for cert template %d: %v", node.ID, err)) } else if version == 1 { versionOneTemplates = append(versionOneTemplates, node) } else if version >= 2 { versionTwoTemplates = append(versionTwoTemplates, node) } else { - log.Warnf("Got cert template %d with an invalid version %d", node.ID, version) + log.Warnf(fmt.Sprintf("Got cert template %d with an invalid version %d", node.ID, version)) } } @@ -208,15 +208,15 @@ func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, p results := make([]analysis.CreatePostRelationshipJob, 0) for _, certTemplateOne := range publishedTemplates { if hasBadEku, err := certTemplateHasEku(certTemplateOne, EkuAnyPurpose); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err) + log.Warnf(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if err != nil { - log.Errorf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err) + log.Errorf(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if hasBadEku { continue } else if hasEku, err := certTemplateHasEku(certTemplateOne, EkuCertRequestAgent); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err) + log.Warnf(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if err != nil { - log.Errorf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err) + log.Errorf(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if !hasEku { continue } else { @@ -224,15 +224,15 @@ func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, p if certTemplateOne.ID == certTemplateTwo.ID { continue } else if authorizedSignatures, err := certTemplateTwo.Properties.Get(ad.AuthorizedSignatures.String()).Float64(); err != nil { - log.Errorf("Error getting authorized signatures for cert template %d: %v", certTemplateTwo.ID, err) + log.Errorf(fmt.Sprintf("Error getting authorized signatures for cert template %d: %v", certTemplateTwo.ID, err)) } else if authorizedSignatures < 1 { continue } else if applicationPolicies, err := certTemplateTwo.Properties.Get(ad.ApplicationPolicies.String()).StringSlice(); err != nil { - log.Errorf("Error getting application policies for cert template %d: %v", certTemplateTwo.ID, err) + log.Errorf(fmt.Sprintf("Error getting application policies for cert template %d: %v", certTemplateTwo.ID, err)) } else if !slices.Contains(applicationPolicies, EkuCertRequestAgent) { continue } else if isLinked, err := DoesCertTemplateLinkToDomain(tx, certTemplateTwo, domainNode); err != nil { - log.Errorf("Error fetch paths from cert template %d to domain: %v", certTemplateTwo.ID, err) + log.Errorf(fmt.Sprintf("Error fetch paths from cert template %d to domain: %v", certTemplateTwo.ID, err)) } else if !isLinked { continue } else { @@ -271,15 +271,15 @@ func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates [] for _, certTemplateOne := range publishedTemplates { //prefilter as much as we can first if hasEku, err := certTemplateHasEkuOrAll(certTemplateOne, EkuCertRequestAgent, EkuAnyPurpose); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err) + log.Warnf(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) } else if err != nil { - log.Errorf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err) + log.Errorf(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) } else if !hasEku { continue } else { for _, certTemplateTwo := range versionOneCertTemplates { if hasPath, err := DoesCertTemplateLinkToDomain(tx, certTemplateTwo, domainNode); err != nil { - log.Errorf("Error getting domain node for certtemplate %d: %v", certTemplateTwo.ID, err) + log.Errorf(fmt.Sprintf("Error getting domain node for certtemplate %d: %v", certTemplateTwo.ID, err)) } else if !hasPath { continue } else { @@ -298,16 +298,16 @@ func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates [] func isStartCertTemplateValidESC3(template *graph.Node) bool { if reqManagerApproval, err := template.Properties.Get(ad.RequiresManagerApproval.String()).Bool(); err != nil { - log.Errorf("Error getting reqmanagerapproval for certtemplate %d: %v", template.ID, err) + log.Errorf(fmt.Sprintf("Error getting reqmanagerapproval for certtemplate %d: %v", template.ID, err)) } else if reqManagerApproval { return false } else if schemaVersion, err := template.Properties.Get(ad.SchemaVersion.String()).Float64(); err != nil { - log.Errorf("Error getting schemaversion for certtemplate %d: %v", template.ID, err) + log.Errorf(fmt.Sprintf("Error getting schemaversion for certtemplate %d: %v", template.ID, err)) } else if schemaVersion == 1 { return true } else if schemaVersion > 1 { if authorizedSignatures, err := template.Properties.Get(ad.AuthorizedSignatures.String()).Float64(); err != nil { - log.Errorf("Error getting authorizedsignatures for certtemplate %d: %v", template.ID, err) + log.Errorf(fmt.Sprintf("Error getting authorizedsignatures for certtemplate %d: %v", template.ID, err)) } else if authorizedSignatures > 0 { return false } else { @@ -320,18 +320,18 @@ func isStartCertTemplateValidESC3(template *graph.Node) bool { func isEndCertTemplateValidESC3(template *graph.Node) bool { if authEnabled, err := template.Properties.Get(ad.AuthenticationEnabled.String()).Bool(); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Did not getting authenabled for cert template %d: %v", template.ID, err) + log.Warnf(fmt.Sprintf("Did not getting authenabled for cert template %d: %v", template.ID, err)) return false } else if err != nil { - log.Errorf("Error getting authenabled for cert template %d: %v", template.ID, err) + log.Errorf(fmt.Sprintf("Error getting authenabled for cert template %d: %v", template.ID, err)) return false } else if !authEnabled { return false } else if reqManagerApproval, err := template.Properties.Get(ad.RequiresManagerApproval.String()).Bool(); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf("Did not getting reqManagerApproval for cert template %d: %v", template.ID, err) + log.Warnf(fmt.Sprintf("Did not getting reqManagerApproval for cert template %d: %v", template.ID, err)) return false } else if err != nil { - log.Errorf("Error getting reqManagerApproval for cert template %d: %v", template.ID, err) + log.Errorf(fmt.Sprintf("Error getting reqManagerApproval for cert template %d: %v", template.ID, err)) return false } else if reqManagerApproval { return false @@ -433,7 +433,7 @@ func GetADCSESC3EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", endNode.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { @@ -618,10 +618,10 @@ func GetADCSESC3EdgeComposition(ctx context.Context, db graph.Database, edge *gr } if collected, err := eca2.Properties.Get(ad.EnrollmentAgentRestrictionsCollected.String()).Bool(); err != nil { - log.Errorf("Error getting enrollmentagentcollected for eca2 %d: %v", eca2.ID, err) + log.Errorf(fmt.Sprintf("Error getting enrollmentagentcollected for eca2 %d: %v", eca2.ID, err)) } else if collected { if hasRestrictions, err := eca2.Properties.Get(ad.HasEnrollmentAgentRestrictions.String()).Bool(); err != nil { - log.Errorf("Error getting hasenrollmentagentrestrictions for ca %d: %v", eca2.ID, err) + log.Errorf(fmt.Sprintf("Error getting hasenrollmentagentrestrictions for ca %d: %v", eca2.ID, err)) } else if hasRestrictions { // Verify p8 path exist diff --git a/packages/go/analysis/ad/esc4.go b/packages/go/analysis/ad/esc4.go index 93fae1f28c..a3657f6491 100644 --- a/packages/go/analysis/ad/esc4.go +++ b/packages/go/analysis/ad/esc4.go @@ -18,6 +18,7 @@ package ad import ( "context" + "fmt" "sync" "github.com/specterops/bloodhound/analysis" @@ -38,24 +39,24 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi publishedTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID) domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String() if err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } // 2. iterate certtemplates that have an outbound `PublishedTo` edge to eca for _, certTemplate := range publishedTemplates { if principalsWithGenericWrite, err := FetchPrincipalsWithGenericWriteOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf("Error fetching principals with %s on cert template: %v", ad.GenericWrite, err) + log.Warnf(fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.GenericWrite, err)) } else if principalsWithEnrollOrAllExtendedRights, err := FetchPrincipalsWithEnrollOrAllExtendedRightsOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf("Error fetching principals with %s or %s on cert template: %v", ad.Enroll, ad.AllExtendedRights, err) + log.Warnf(fmt.Sprintf("Error fetching principals with %s or %s on cert template: %v", ad.Enroll, ad.AllExtendedRights, err)) } else if principalsWithPKINameFlag, err := FetchPrincipalsWithWritePKINameFlagOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf("Error fetching principals with %s on cert template: %v", ad.WritePKINameFlag, err) + log.Warnf(fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.WritePKINameFlag, err)) } else if principalsWithPKIEnrollmentFlag, err := FetchPrincipalsWithWritePKIEnrollmentFlagOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf("Error fetching principals with %s on cert template: %v", ad.WritePKIEnrollmentFlag, err) + log.Warnf(fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.WritePKIEnrollmentFlag, err)) } else if enrolleeSuppliesSubject, err := certTemplate.Properties.Get(string(ad.EnrolleeSuppliesSubject)).Bool(); err != nil { - log.Warnf("Error fetching %s property on cert template: %v", ad.EnrolleeSuppliesSubject, err) + log.Warnf(fmt.Sprintf("Error fetching %s property on cert template: %v", ad.EnrolleeSuppliesSubject, err)) } else if requiresManagerApproval, err := certTemplate.Properties.Get(string(ad.RequiresManagerApproval)).Bool(); err != nil { - log.Warnf("Error fetching %s property on cert template: %v", ad.RequiresManagerApproval, err) + log.Warnf(fmt.Sprintf("Error fetching %s property on cert template: %v", ad.RequiresManagerApproval, err)) } else { var ( @@ -85,7 +86,7 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi // 2c. kick out early if cert template does meet conditions for ESC4 if valid, err := isCertTemplateValidForESC4(certTemplate); err != nil { - log.Warnf("Error validating cert template %d: %v", certTemplate.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) continue } else if !valid { continue @@ -621,7 +622,7 @@ func GetADCSESC4EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", endNode.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc6.go b/packages/go/analysis/ad/esc6.go index eec4116359..eda87dcf9d 100644 --- a/packages/go/analysis/ad/esc6.go +++ b/packages/go/analysis/ad/esc6.go @@ -18,6 +18,7 @@ package ad import ( "context" + "fmt" "sync" "github.com/specterops/bloodhound/ein" @@ -36,7 +37,7 @@ import ( func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA, domain *graph.Node, cache ADCSCache) error { if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if isUserSpecifiesSanEnabled, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabled.String()).Bool(); err != nil { return err @@ -52,7 +53,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys ) for _, publishedCertTemplate := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC6(publishedCertTemplate, false); err != nil { - log.Warnf("Error validating cert template %d: %v", publishedCertTemplate.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", publishedCertTemplate.ID, err)) continue } else if !valid { continue @@ -80,7 +81,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA, domain *graph.Node, cache ADCSCache) error { if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if isUserSpecifiesSanEnabled, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabled.String()).Bool(); err != nil { return err @@ -98,7 +99,7 @@ func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analys ) for _, publishedCertTemplate := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC6(publishedCertTemplate, true); err != nil { - log.Warnf("Error validating cert template %d: %v", publishedCertTemplate.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", publishedCertTemplate.ID, err)) continue } else if !valid { continue @@ -276,7 +277,7 @@ func GetADCSESC6EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", endNode.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc9.go b/packages/go/analysis/ad/esc9.go index 2ca4c3218f..144a0aa960 100644 --- a/packages/go/analysis/ad/esc9.go +++ b/packages/go/analysis/ad/esc9.go @@ -18,6 +18,7 @@ package ad import ( "context" + "fmt" "sync" "github.com/specterops/bloodhound/analysis" @@ -44,21 +45,21 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys } else { for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC9(template, false); err != nil { - log.Warnf("Error validating cert template %d: %v", template.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf("Failed to retrieve enrollers for cert template %d from cache", template.ID) + log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if filteredVictims, err := filterUserDNSResults(tx, victimBitmap, template); err != nil { - log.Warnf("Error filtering users from victims for esc9a: %v", err) + log.Warnf(fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) continue } else if attackers, err := FetchAttackersForEscalations9and10(tx, filteredVictims, false); err != nil { - log.Warnf("Error getting start nodes for esc9a attacker nodes: %v", err) + log.Warnf(fmt.Sprintf("Error getting start nodes for esc9a attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) @@ -90,18 +91,18 @@ func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analys } else { for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC9(template, true); err != nil { - log.Warnf("Error validating cert template %d: %v", template.ID, err) + log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf("Failed to retrieve enrollers for cert template %d from cache", template.ID) + log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if attackers, err := FetchAttackersForEscalations9and10(tx, victimBitmap, true); err != nil { - log.Warnf("Error getting start nodes for esc9a attacker nodes: %v", err) + log.Warnf(fmt.Sprintf("Error getting start nodes for esc9a attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) diff --git a/packages/go/analysis/ad/esc_shared.go b/packages/go/analysis/ad/esc_shared.go index 0bc7cb97aa..91956894aa 100644 --- a/packages/go/analysis/ad/esc_shared.go +++ b/packages/go/analysis/ad/esc_shared.go @@ -45,7 +45,7 @@ func PostTrustedForNTAuth(ctx context.Context, db graph.Database, operation anal operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if thumbprints, err := innerNode.Properties.Get(ad.CertThumbprints.String()).StringSlice(); err != nil { if strings.Contains(err.Error(), graph.ErrPropertyNotFound.Error()) { - log.Warnf("Unable to post-process TrustedForNTAuth edge for NTAuthStore node %d due to missing adcs data: %v", innerNode.ID, err) + log.Warnf(fmt.Sprintf("Unable to post-process TrustedForNTAuth edge for NTAuthStore node %d due to missing adcs data: %v", innerNode.ID, err)) return nil } return err @@ -178,7 +178,7 @@ func PostEnterpriseCAFor(operation analysis.StatTrackedOperation[analysis.Create func PostGoldenCert(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, domain, enterpriseCA *graph.Node) error { if hostCAServiceComputers, err := FetchHostsCAServiceComputers(tx, enterpriseCA); err != nil { - log.Errorf("Error fetching host ca computer for enterprise ca %d: %v", enterpriseCA.ID, err) + log.Errorf(fmt.Sprintf("Error fetching host ca computer for enterprise ca %d: %v", enterpriseCA.ID, err)) } else { for _, computer := range hostCAServiceComputers { channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ diff --git a/packages/go/analysis/ad/membership.go b/packages/go/analysis/ad/membership.go index 181c3457bf..c7370c7132 100644 --- a/packages/go/analysis/ad/membership.go +++ b/packages/go/analysis/ad/membership.go @@ -62,7 +62,7 @@ func ResolveAllGroupMemberships(ctx context.Context, db graph.Database, addition return memberships, err } - log.Infof("Collected %d groups to resolve", len(adGroupIDs)) + log.Infof(fmt.Sprintf("Collected %d groups to resolve", len(adGroupIDs))) for _, adGroupID := range adGroupIDs { if traversalMap.Contains(adGroupID.Uint64()) { diff --git a/packages/go/analysis/ad/post.go b/packages/go/analysis/ad/post.go index 23d4e50c72..b1080cef7d 100644 --- a/packages/go/analysis/ad/post.go +++ b/packages/go/analysis/ad/post.go @@ -185,7 +185,7 @@ func getLAPSSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions im ) if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil, err } else if getChangesNodes, err := ops.FetchStartNodes(getChangesQuery); err != nil { return nil, err @@ -205,7 +205,7 @@ func getDCSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions impa ) if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf("Error getting domain SID for domain %d: %v", domain.ID, err) + log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil, err } else if getChangesNodes, err := ops.FetchStartNodes(getChangesQuery); err != nil { return nil, err @@ -252,7 +252,7 @@ func PostLocalGroups(ctx context.Context, db graph.Database, localGroupExpansion computerID := graph.ID(computer) if idx > 0 && idx%10000 == 0 { - log.Infof("Post processed %d active directory computers", idx) + log.Infof(fmt.Sprintf("Post processed %d active directory computers", idx)) } if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -344,7 +344,7 @@ func PostLocalGroups(ctx context.Context, db graph.Database, localGroupExpansion } } - log.Infof("Finished post-processing %d active directory computers", computers.GetCardinality()) + log.Infof(fmt.Sprintf("Finished post-processing %d active directory computers", computers.GetCardinality())) return &operation.Stats, operation.Done() } } @@ -483,7 +483,7 @@ func FetchLocalGroupBitmapForComputer(tx graph.Transaction, computer graph.ID, s } func ExpandAllRDPLocalGroups(ctx context.Context, db graph.Database) (impact.PathAggregator, error) { - log.Infof("Expanding all AD group and local group memberships") + log.Infof(fmt.Sprintf("Expanding all AD group and local group memberships")) return ResolveAllGroupMemberships(ctx, db, query.Not( query.Or( diff --git a/packages/go/analysis/ad/queries.go b/packages/go/analysis/ad/queries.go index 6b0c2797d6..71a09d7853 100644 --- a/packages/go/analysis/ad/queries.go +++ b/packages/go/analysis/ad/queries.go @@ -18,6 +18,7 @@ package ad import ( "context" + "fmt" "strings" "time" @@ -1513,7 +1514,7 @@ func FetchUserSessionCompleteness(tx graph.Transaction, domainSIDs ...string) (f func FetchAllGroupMembers(ctx context.Context, db graph.Database, targets graph.NodeSet) (graph.NodeSet, error) { defer log.Measure(log.LevelInfo, "FetchAllGroupMembers")() - log.Infof("Fetching group members for %d AD nodes", len(targets)) + log.Infof(fmt.Sprintf("Fetching group members for %d AD nodes", len(targets))) allGroupMembers := graph.NewNodeSet() @@ -1527,7 +1528,7 @@ func FetchAllGroupMembers(ctx context.Context, db graph.Database, targets graph. } } - log.Infof("Collected %d group members", len(allGroupMembers)) + log.Infof(fmt.Sprintf("Collected %d group members", len(allGroupMembers))) return allGroupMembers, nil } diff --git a/packages/go/analysis/azure/application.go b/packages/go/analysis/azure/application.go index 407826ac0a..d51da4ba1c 100644 --- a/packages/go/analysis/azure/application.go +++ b/packages/go/analysis/azure/application.go @@ -18,6 +18,7 @@ package azure import ( "context" + "fmt" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/azure" @@ -60,12 +61,12 @@ func getAppServicePrincipalID(tx graph.Transaction, node *graph.Node) (string, e return "", err } else if appServicePrincipals.Len() == 0 { // Don't want this to break the function, but we'll want to know about it - log.Errorf("Application node %d has no service principals attached", node.ID) + log.Errorf(fmt.Sprintf("Application node %d has no service principals attached", node.ID)) } else { servicePrincipal := appServicePrincipals.Pick() if servicePrincipalID, err = servicePrincipal.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", servicePrincipal.ID, node.ID, err) + log.Errorf(fmt.Sprintf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", servicePrincipal.ID, node.ID, err)) } } return servicePrincipalID, nil diff --git a/packages/go/analysis/azure/filters.go b/packages/go/analysis/azure/filters.go index 82d73a870c..558dd5fb14 100644 --- a/packages/go/analysis/azure/filters.go +++ b/packages/go/analysis/azure/filters.go @@ -17,6 +17,8 @@ package azure import ( + "fmt" + "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" @@ -94,7 +96,7 @@ func roleDescentFilter(ctx *ops.TraversalContext, segment *graph.PathSegment) bo // If the group does not allow role inheritance then we do not inherit the terminal role if isRoleAssignable, err := end.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil || !isRoleAssignable { if graph.IsErrPropertyNotFound(err) { - log.Warnf("Node %d is missing property %s", end.ID, azure.IsAssignableToRole) + log.Warnf(fmt.Sprintf("Node %d is missing property %s", end.ID, azure.IsAssignableToRole)) } acceptDescendent = false return false diff --git a/packages/go/analysis/azure/post.go b/packages/go/analysis/azure/post.go index 5a2a5ce766..d1fab25042 100644 --- a/packages/go/analysis/azure/post.go +++ b/packages/go/analysis/azure/post.go @@ -240,7 +240,7 @@ func AppRoleAssignments(ctx context.Context, db graph.Database) (*analysis.Atomi return nil }); err != nil { if err := operation.Done(); err != nil { - log.Errorf("Error caught during azure AppRoleAssignments teardown: %v", err) + log.Errorf(fmt.Sprintf("Error caught during azure AppRoleAssignments teardown: %v", err)) } return &operation.Stats, err @@ -660,7 +660,7 @@ func addSecret(operation analysis.StatTrackedOperation[analysis.CreatePostRelati } else { for _, role := range addSecretRoles { for _, target := range tenantAppsAndSPs { - log.Debugf("Adding AZAddSecret edge from role %s to %s %d", role.ID.String(), target.Kinds.Strings(), target.ID) + log.Debugf(fmt.Sprintf("Adding AZAddSecret edge from role %s to %s %d", role.ID.String(), target.Kinds.Strings(), target.ID)) nextJob := analysis.CreatePostRelationshipJob{ FromID: role.ID, ToID: target.ID, @@ -723,7 +723,7 @@ func ExecuteCommand(ctx context.Context, db graph.Database) (*analysis.AtomicPos return nil }); err != nil { if err := operation.Done(); err != nil { - log.Errorf("Error caught during azure ExecuteCommand teardown: %v", err) + log.Errorf(fmt.Sprintf("Error caught during azure ExecuteCommand teardown: %v", err)) } return &operation.Stats, err @@ -804,7 +804,7 @@ func globalAdmins(roleAssignments RoleAssignments, tenant *graph.Node, operation return nil }); err != nil { - log.Errorf("Failed to submit azure global admins post processing job: %v", err) + log.Errorf(fmt.Sprintf("Failed to submit azure global admins post processing job: %v", err)) } } @@ -822,7 +822,7 @@ func privilegedRoleAdmins(roleAssignments RoleAssignments, tenant *graph.Node, o return nil }); err != nil { - log.Errorf("Failed to submit privileged role admins post processing job: %v", err) + log.Errorf(fmt.Sprintf("Failed to submit privileged role admins post processing job: %v", err)) } } @@ -840,7 +840,7 @@ func privilegedAuthAdmins(roleAssignments RoleAssignments, tenant *graph.Node, o return nil }); err != nil { - log.Errorf("Failed to submit azure privileged auth admins post processing job: %v", err) + log.Errorf(fmt.Sprintf("Failed to submit azure privileged auth admins post processing job: %v", err)) } } @@ -864,13 +864,13 @@ func addMembers(roleAssignments RoleAssignments, operation analysis.StatTrackedO return nil }); err != nil { - log.Errorf("Failed to submit azure add members AddMemberAllGroupsTargetRoles post processing job: %v", err) + log.Errorf(fmt.Sprintf("Failed to submit azure add members AddMemberAllGroupsTargetRoles post processing job: %v", err)) } if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if isRoleAssignable, err := innerGroup.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil { if graph.IsErrPropertyNotFound(err) { - log.Warnf("Node %d is missing property %s", innerGroup.ID, azure.IsAssignableToRole) + log.Warnf(fmt.Sprintf("Node %d is missing property %s", innerGroup.ID, azure.IsAssignableToRole)) } else { return err } @@ -888,7 +888,7 @@ func addMembers(roleAssignments RoleAssignments, operation analysis.StatTrackedO return nil }); err != nil { - log.Errorf("Failed to submit azure add members AddMemberGroupNotRoleAssignableTargetRoles post processing job: %v", err) + log.Errorf(fmt.Sprintf("Failed to submit azure add members AddMemberGroupNotRoleAssignableTargetRoles post processing job: %v", err)) } } } @@ -902,14 +902,14 @@ func UserRoleAssignments(ctx context.Context, db graph.Database) (*analysis.Atom for _, tenant := range tenantNodes { if roleAssignments, err := TenantRoleAssignments(ctx, db, tenant); err != nil { if err := operation.Done(); err != nil { - log.Errorf("Error caught during azure UserRoleAssignments.TenantRoleAssignments teardown: %v", err) + log.Errorf(fmt.Sprintf("Error caught during azure UserRoleAssignments.TenantRoleAssignments teardown: %v", err)) } return &analysis.AtomicPostProcessingStats{}, err } else { if err := resetPassword(operation, tenant, roleAssignments); err != nil { if err := operation.Done(); err != nil { - log.Errorf("Error caught during azure UserRoleAssignments.resetPassword teardown: %v", err) + log.Errorf(fmt.Sprintf("Error caught during azure UserRoleAssignments.resetPassword teardown: %v", err)) } return &analysis.AtomicPostProcessingStats{}, err diff --git a/packages/go/analysis/azure/queries.go b/packages/go/analysis/azure/queries.go index f53ea45120..461c0ca996 100644 --- a/packages/go/analysis/azure/queries.go +++ b/packages/go/analysis/azure/queries.go @@ -18,6 +18,7 @@ package azure import ( "context" + "fmt" "strings" "github.com/RoaringBitmap/roaring/roaring64" @@ -57,7 +58,7 @@ func FetchGraphDBTierZeroTaggedAssets(tx graph.Transaction, tenant *graph.Node) defer log.LogAndMeasure(log.LevelInfo, "Tenant %d FetchGraphDBTierZeroTaggedAssets", tenant.ID)() if tenantObjectID, err := tenant.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err) + log.Errorf(fmt.Sprintf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err)) return nil, err } else { if nodeSet, err := ops.FetchNodeSet(tx.Nodes().Filterf(func() graph.Criteria { diff --git a/packages/go/analysis/azure/service_principal.go b/packages/go/analysis/azure/service_principal.go index dc7a84cabb..7a7ede7ddd 100644 --- a/packages/go/analysis/azure/service_principal.go +++ b/packages/go/analysis/azure/service_principal.go @@ -18,6 +18,7 @@ package azure import ( "context" + "fmt" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/azure" @@ -58,12 +59,12 @@ func getServicePrincipalAppID(tx graph.Transaction, node *graph.Node) (string, e return appID, err } else if servicePrincipalApps.Len() == 0 { // Don't want this to break the function, but we'll want to know about it - log.Warnf("Service principal node %d has no applications attached", node.ID) + log.Warnf(fmt.Sprintf("Service principal node %d has no applications attached", node.ID)) } else { app := servicePrincipalApps.Pick() if appID, err = app.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", app.ID, node.ID, err) + log.Errorf(fmt.Sprintf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", app.ID, node.ID, err)) } } return appID, nil diff --git a/packages/go/analysis/hybrid/hybrid.go b/packages/go/analysis/hybrid/hybrid.go index 382abe3c2c..0d31ca1ee9 100644 --- a/packages/go/analysis/hybrid/hybrid.go +++ b/packages/go/analysis/hybrid/hybrid.go @@ -179,7 +179,7 @@ func createMissingADUser(ctx context.Context, db graph.Database, objectID string newNode *graph.Node ) - log.Debugf("Matching AD User node with objectID %s not found, creating a new one", objectID) + log.Debugf(fmt.Sprintf("Matching AD User node with objectID %s not found, creating a new one", objectID)) properties := graph.AsProperties(map[string]any{ common.ObjectID.String(): objectID, }) diff --git a/packages/go/analysis/impact/aggregator.go b/packages/go/analysis/impact/aggregator.go index b28d50009c..f19267f111 100644 --- a/packages/go/analysis/impact/aggregator.go +++ b/packages/go/analysis/impact/aggregator.go @@ -17,6 +17,8 @@ package impact import ( + "fmt" + "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/log" @@ -158,7 +160,7 @@ func (s Aggregator) resolve(targetID uint64) cardinality.Provider[uint64] { } func (s Aggregator) Cardinality(targets ...uint64) cardinality.Provider[uint64] { - log.Debugf("Calculating pathMembers cardinality for %d targets", len(targets)) + log.Debugf(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) defer log.Measure(log.LevelDebug, "Calculated pathMembers cardinality for %d targets", len(targets))() impact := s.newCardinalityProvider() diff --git a/packages/go/analysis/impact/id_aggregator.go b/packages/go/analysis/impact/id_aggregator.go index 5ff00666de..ba069c6e3b 100644 --- a/packages/go/analysis/impact/id_aggregator.go +++ b/packages/go/analysis/impact/id_aggregator.go @@ -17,6 +17,7 @@ package impact import ( + "fmt" "sync" "github.com/specterops/bloodhound/dawgs/cardinality" @@ -211,7 +212,7 @@ func (s IDA) resolve(targetID uint64) cardinality.Provider[uint64] { } func (s IDA) Cardinality(targets ...uint64) cardinality.Provider[uint64] { - log.Debugf("Calculating pathMembers cardinality for %d targets", len(targets)) + log.Debugf(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) defer log.Measure(log.LevelDebug, "Calculated pathMembers cardinality for %d targets", len(targets))() impact := s.newCardinalityProvider() diff --git a/packages/go/analysis/post.go b/packages/go/analysis/post.go index 16f0a8de79..446e12b8fd 100644 --- a/packages/go/analysis/post.go +++ b/packages/go/analysis/post.go @@ -18,6 +18,7 @@ package analysis import ( "context" + "fmt" "sort" "github.com/specterops/bloodhound/dawgs/graph" @@ -92,19 +93,19 @@ func (s PostProcessingStats) LogStats() { return } - log.Debugf("Relationships deleted before post-processing:") + log.Debugf(fmt.Sprintf("Relationships deleted before post-processing:")) for _, relationship := range statsSortedKeys(s.RelationshipsDeleted) { if numDeleted := s.RelationshipsDeleted[relationship]; numDeleted > 0 { - log.Debugf(" %s %d", relationship.String(), numDeleted) + log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) } } - log.Debugf("Relationships created after post-processing:") + log.Debugf(fmt.Sprintf("Relationships created after post-processing:")) for _, relationship := range statsSortedKeys(s.RelationshipsCreated) { if numDeleted := s.RelationshipsCreated[relationship]; numDeleted > 0 { - log.Debugf(" %s %d", relationship.String(), s.RelationshipsCreated[relationship]) + log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), s.RelationshipsCreated[relationship])) } } } diff --git a/packages/go/analysis/post_operation.go b/packages/go/analysis/post_operation.go index c5850c80e4..1f4fd464f8 100644 --- a/packages/go/analysis/post_operation.go +++ b/packages/go/analysis/post_operation.go @@ -18,6 +18,7 @@ package analysis import ( "context" + "fmt" "sync" "sync/atomic" "time" @@ -133,19 +134,19 @@ func (s *AtomicPostProcessingStats) LogStats() { return } - log.Debugf("Relationships deleted before post-processing:") + log.Debugf(fmt.Sprintf("Relationships deleted before post-processing:")) for _, relationship := range atomicStatsSortedKeys(s.RelationshipsDeleted) { if numDeleted := int(*s.RelationshipsDeleted[relationship]); numDeleted > 0 { - log.Debugf(" %s %d", relationship.String(), numDeleted) + log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) } } - log.Debugf("Relationships created after post-processing:") + log.Debugf(fmt.Sprintf("Relationships created after post-processing:")) for _, relationship := range atomicStatsSortedKeys(s.RelationshipsCreated) { if numCreated := int(*s.RelationshipsCreated[relationship]); numCreated > 0 { - log.Debugf(" %s %d", relationship.String(), numCreated) + log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), numCreated)) } } } diff --git a/packages/go/cache/cache_benchmark_test.go b/packages/go/cache/cache_benchmark_test.go index 4885fe6500..31e07b4cb7 100644 --- a/packages/go/cache/cache_benchmark_test.go +++ b/packages/go/cache/cache_benchmark_test.go @@ -44,7 +44,7 @@ func getObjectIDs(num int) []string { func setupLRUCache() cache.Cache { if c, err := cache.NewCache(cache.Config{MaxSize: numSimulatedOUs}); err != nil { - log.Fatalf("Error creating cache: %v", err) + log.Fatalf(fmt.Sprintf("Error creating cache: %v", err)) } else { return c } diff --git a/packages/go/conftool/main.go b/packages/go/conftool/main.go index d37c084d84..f602c96721 100644 --- a/packages/go/conftool/main.go +++ b/packages/go/conftool/main.go @@ -19,6 +19,7 @@ package main import ( "encoding/json" "flag" + "fmt" "log" "os" "time" @@ -38,22 +39,22 @@ func main() { flag.Parse() if configfile, err := os.Create(path); err != nil { - log.Fatalf("Could not create config file %s: %v", path, err) + log.Fatalf(fmt.Sprintf("Could not create config file %s: %v", path, err)) } else { defer configfile.Close() if !skipArgon2 { - log.Printf("Tuning Argon2 parameters to target %d milliseconds. This might take some time.", tuneMillis) + log.Printf(fmt.Sprintf("Tuning Argon2 parameters to target %d milliseconds. This might take some time.", tuneMillis)) } if argon2Config, err := config.GenerateArgonSettings(time.Duration(tuneMillis), skipArgon2); err != nil { - log.Fatalf("Could not generate argon2 settings: %v", err) + log.Fatalf(fmt.Sprintf("Could not generate argon2 settings: %v", err)) } else if bytes, err := json.Marshal(argon2Config); err != nil { - log.Fatalf("Coule not marshal argon2 settings: %v", err) + log.Fatalf(fmt.Sprintf("Coule not marshal argon2 settings: %v", err)) } else if _, err := configfile.Write(bytes); err != nil { - log.Fatalf("Could not write to config file %s: %v", path, err) + log.Fatalf(fmt.Sprintf("Could not write to config file %s: %v", path, err)) } else { - log.Printf("Successfully wrote to config file to %s", path) + log.Printf(fmt.Sprintf("Successfully wrote to config file to %s", path)) } } } diff --git a/packages/go/cypher/models/pgsql/translate/expression.go b/packages/go/cypher/models/pgsql/translate/expression.go index 92b04fadd2..b62c54443d 100644 --- a/packages/go/cypher/models/pgsql/translate/expression.go +++ b/packages/go/cypher/models/pgsql/translate/expression.go @@ -213,7 +213,7 @@ func InferExpressionType(expression pgsql.Expression) (pgsql.DataType, error) { // Infer type information for well known column names switch typedExpression[1] { -// TODO: Graph ID should be int2 + // TODO: Graph ID should be int2 case pgsql.ColumnGraphID, pgsql.ColumnID, pgsql.ColumnStartID, pgsql.ColumnEndID: return pgsql.Int8, nil @@ -256,7 +256,7 @@ func InferExpressionType(expression pgsql.Expression) (pgsql.DataType, error) { return InferExpressionType(typedExpression.Expression) default: - log.Infof("unable to infer type hint for expression type: %T", expression) + log.Infof(fmt.Sprintf("unable to infer type hint for expression type: %T", expression)) return pgsql.UnknownDataType, nil } } diff --git a/packages/go/dawgs/drivers/neo4j/cypher.go b/packages/go/dawgs/drivers/neo4j/cypher.go index aa30307ed0..836e11f3ce 100644 --- a/packages/go/dawgs/drivers/neo4j/cypher.go +++ b/packages/go/dawgs/drivers/neo4j/cypher.go @@ -18,6 +18,7 @@ package neo4j import ( "bytes" + "fmt" "sort" "strings" @@ -308,9 +309,9 @@ func stripCypherQuery(rawQuery string) string { ) if queryModel, err := frontend.ParseCypher(frontend.DefaultCypherContext(), rawQuery); err != nil { - log.Errorf("Error occurred parsing cypher query during sanitization: %v", err) + log.Errorf(fmt.Sprintf("Error occurred parsing cypher query during sanitization: %v", err)) } else if err = strippedEmitter.Write(queryModel, buffer); err != nil { - log.Errorf("Error occurred sanitizing cypher query: %v", err) + log.Errorf(fmt.Sprintf("Error occurred sanitizing cypher query: %v", err)) } return buffer.String() diff --git a/packages/go/dawgs/drivers/neo4j/index.go b/packages/go/dawgs/drivers/neo4j/index.go index d3c11831aa..6588034c81 100644 --- a/packages/go/dawgs/drivers/neo4j/index.go +++ b/packages/go/dawgs/drivers/neo4j/index.go @@ -122,7 +122,7 @@ func indexTypeProvider(indexType graph.IndexType) string { func assertIndexes(ctx context.Context, db graph.Database, indexesToRemove []string, indexesToAdd map[string]neo4jIndex) error { if err := db.WriteTransaction(ctx, func(tx graph.Transaction) error { for _, indexToRemove := range indexesToRemove { - log.Infof("Removing index %s", indexToRemove) + log.Infof(fmt.Sprintf("Removing index %s", indexToRemove)) result := tx.Raw(strings.Replace(dropPropertyIndexStatement, "$name", indexToRemove, 1), nil) result.Close() @@ -139,7 +139,7 @@ func assertIndexes(ctx context.Context, db graph.Database, indexesToRemove []str return db.WriteTransaction(ctx, func(tx graph.Transaction) error { for indexName, indexToAdd := range indexesToAdd { - log.Infof("Adding index %s to labels %s on properties %s using %s", indexName, indexToAdd.kind.String(), indexToAdd.Field, indexTypeProvider(indexToAdd.Type)) + log.Infof(fmt.Sprintf("Adding index %s to labels %s on properties %s using %s", indexName, indexToAdd.kind.String(), indexToAdd.Field, indexTypeProvider(indexToAdd.Type))) if err := db.Run(ctx, createPropertyIndexStatement, map[string]interface{}{ "name": indexName, diff --git a/packages/go/dawgs/drivers/neo4j/transaction.go b/packages/go/dawgs/drivers/neo4j/transaction.go index 421f2a6d79..83d863eed3 100644 --- a/packages/go/dawgs/drivers/neo4j/transaction.go +++ b/packages/go/dawgs/drivers/neo4j/transaction.go @@ -330,7 +330,7 @@ func (s *neo4jTransaction) Raw(stmt string, params map[string]any) graph.Result prettyParameters.WriteString(":") if marshalledValue, err := json.Marshal(value); err != nil { - log.Errorf("Unable to marshal query parameter %s", key) + log.Errorf(fmt.Sprintf("Unable to marshal query parameter %s", key)) } else { prettyParameters.Write(marshalledValue) } diff --git a/packages/go/dawgs/drivers/pg/batch.go b/packages/go/dawgs/drivers/pg/batch.go index dfa3fb6872..3d6f3bdcde 100644 --- a/packages/go/dawgs/drivers/pg/batch.go +++ b/packages/go/dawgs/drivers/pg/batch.go @@ -502,7 +502,7 @@ func (s *batch) flushRelationshipCreateBuffer() error { } else if graphTarget, err := s.innerTransaction.getTargetGraph(); err != nil { return err } else if _, err := s.innerTransaction.tx.Exec(s.ctx, createEdgeBatchStatement, graphTarget.ID, createBatch.startIDs, createBatch.endIDs, createBatch.edgeKindIDs, createBatch.edgePropertyBags); err != nil { - log.Infof("Num merged property bags: %d - Num edge keys: %d - StartID batch size: %d", len(batchBuilder.edgePropertiesIndex), len(batchBuilder.keyToEdgeID), len(batchBuilder.relationshipUpdateBatch.startIDs)) + log.Infof(fmt.Sprintf("Num merged property bags: %d - Num edge keys: %d - StartID batch size: %d", len(batchBuilder.edgePropertiesIndex), len(batchBuilder.keyToEdgeID), len(batchBuilder.relationshipUpdateBatch.startIDs))) return err } diff --git a/packages/go/dawgs/drivers/pg/pg.go b/packages/go/dawgs/drivers/pg/pg.go index 6e22ad64c9..c70a293cbb 100644 --- a/packages/go/dawgs/drivers/pg/pg.go +++ b/packages/go/dawgs/drivers/pg/pg.go @@ -59,7 +59,7 @@ func afterPooledConnectionRelease(conn *pgx.Conn) bool { if _, hasType := conn.TypeMap().TypeForName(dataType.String()); !hasType { // This connection should be destroyed since it does not contain information regarding the schema's // composite types - log.Warnf("Unable to find expected data type: %s. This database connection will not be pooled.", dataType) + log.Warnf(fmt.Sprintf("Unable to find expected data type: %s. This database connection will not be pooled.", dataType)) return false } } diff --git a/packages/go/dawgs/drivers/pg/tooling.go b/packages/go/dawgs/drivers/pg/tooling.go index 082e461b46..aa4728982e 100644 --- a/packages/go/dawgs/drivers/pg/tooling.go +++ b/packages/go/dawgs/drivers/pg/tooling.go @@ -17,6 +17,7 @@ package pg import ( + "fmt" "regexp" "sync" @@ -53,7 +54,7 @@ type queryHook struct { func (s *queryHook) Execute(query string, arguments ...any) { switch s.action { case actionTrace: - log.Infof("Here") + log.Infof(fmt.Sprintf("Here")) } } diff --git a/packages/go/dawgs/traversal/traversal.go b/packages/go/dawgs/traversal/traversal.go index c3245db5e4..fc0eeca30c 100644 --- a/packages/go/dawgs/traversal/traversal.go +++ b/packages/go/dawgs/traversal/traversal.go @@ -123,12 +123,12 @@ func (s *pattern) Do(delegate PatternMatchDelegate) Driver { func (s *pattern) OutboundWithDepth(min, max int, criteria ...graph.Criteria) PatternContinuation { if min < 0 { min = 1 - log.Warnf("Negative mindepth not allowed. Setting min depth for expansion to 1") + log.Warnf(fmt.Sprintf("Negative mindepth not allowed. Setting min depth for expansion to 1")) } if max < 0 { max = 0 - log.Warnf("Negative maxdepth not allowed. Setting max depth for expansion to 0") + log.Warnf(fmt.Sprintf("Negative maxdepth not allowed. Setting max depth for expansion to 0")) } s.expansions = append(s.expansions, expansion{ @@ -151,12 +151,12 @@ func (s *pattern) Outbound(criteria ...graph.Criteria) PatternContinuation { func (s *pattern) InboundWithDepth(min, max int, criteria ...graph.Criteria) PatternContinuation { if min < 0 { min = 1 - log.Warnf("Negative mindepth not allowed. Setting min depth for expansion to 1") + log.Warnf(fmt.Sprintf("Negative mindepth not allowed. Setting min depth for expansion to 1")) } if max < 0 { max = 0 - log.Warnf("Negative maxdepth not allowed. Setting max depth for expansion to 0") + log.Warnf(fmt.Sprintf("Negative maxdepth not allowed. Setting max depth for expansion to 0")) } s.expansions = append(s.expansions, expansion{ @@ -527,21 +527,21 @@ func FilteredSkipLimit(filter SkipLimitFilter, visitorFilter SegmentVisitor, ski if skip == 0 || shouldCollect() { // If we should collect this result, check to see if we're already at a limit for the number of results if limit > 0 && atLimit() { - log.Debugf("At collection limit, rejecting path: %s", graph.FormatPathSegment(next)) + log.Debugf(fmt.Sprintf("At collection limit, rejecting path: %s", graph.FormatPathSegment(next))) return false } - log.Debugf("Collected path: %s", graph.FormatPathSegment(next)) + log.Debugf(fmt.Sprintf("Collected path: %s", graph.FormatPathSegment(next))) visitorFilter(next) } else { - log.Debugf("Skipping path visit: %s", graph.FormatPathSegment(next)) + log.Debugf(fmt.Sprintf("Skipping path visit: %s", graph.FormatPathSegment(next))) } } if shouldDescend { - log.Debugf("Descending into path: %s", graph.FormatPathSegment(next)) + log.Debugf(fmt.Sprintf("Descending into path: %s", graph.FormatPathSegment(next))) } else { - log.Debugf("Rejecting further descent into path: %s", graph.FormatPathSegment(next)) + log.Debugf(fmt.Sprintf("Rejecting further descent into path: %s", graph.FormatPathSegment(next))) } return shouldDescend diff --git a/packages/go/ein/ad.go b/packages/go/ein/ad.go index b39c188221..c2a50f9347 100644 --- a/packages/go/ein/ad.go +++ b/packages/go/ein/ad.go @@ -17,6 +17,7 @@ package ein import ( + "fmt" "strconv" "strings" @@ -77,7 +78,7 @@ func stringToBool(itemProps map[string]any, keyName string) { case bool: //pass default: - log.Debugf("Removing %s with type %T", converted) + log.Debugf(fmt.Sprintf("Removing %s with type %T", converted)) delete(itemProps, keyName) } } @@ -95,7 +96,7 @@ func stringToInt(itemProps map[string]any, keyName string) { case int: //pass default: - log.Debugf("Removing %s with type %T", keyName, converted) + log.Debugf(fmt.Sprintf("Removing %s with type %T", keyName, converted)) delete(itemProps, keyName) } } @@ -194,10 +195,10 @@ func ParseACEData(aces []ACE, targetID string, targetType graph.Kind) []Ingestib } if rightKind, err := analysis.ParseKind(ace.RightName); err != nil { - log.Errorf("Error during ParseACEData: %v", err) + log.Errorf(fmt.Sprintf("Error during ParseACEData: %v", err)) continue } else if !ad.IsACLKind(rightKind) { - log.Errorf("Non-ace edge type given to process aces: %s", ace.RightName) + log.Errorf(fmt.Sprintf("Non-ace edge type given to process aces: %s", ace.RightName)) continue } else { converted = append(converted, NewIngestibleRelationship( @@ -225,7 +226,7 @@ func convertSPNData(spns []SPNTarget, sourceID string) []IngestibleRelationship for _, s := range spns { if kind, err := analysis.ParseKind(s.Service); err != nil { - log.Errorf("Error during processSPNTargets: %v", err) + log.Errorf(fmt.Sprintf("Error during processSPNTargets: %v", err)) } else { converted = append(converted, NewIngestibleRelationship( IngestibleSource{ @@ -367,7 +368,7 @@ func ParseDomainTrusts(domain Domain) ParsedDomainTrustData { switch converted := trust.TrustAttributes.(type) { case string: if i, err := strconv.Atoi(converted); err != nil { - log.Errorf("Error converting trust attributes %s to int", converted) + log.Errorf(fmt.Sprintf("Error converting trust attributes %s to int", converted)) finalTrustAttributes = 0 } else { finalTrustAttributes = i @@ -375,7 +376,7 @@ func ParseDomainTrusts(domain Domain) ParsedDomainTrustData { case int: finalTrustAttributes = converted default: - log.Errorf("Error converting trust attributes %s to int", converted) + log.Errorf(fmt.Sprintf("Error converting trust attributes %s to int", converted)) finalTrustAttributes = 0 } diff --git a/packages/go/ein/azure.go b/packages/go/ein/azure.go index 5f9d7915ac..754d966471 100644 --- a/packages/go/ein/azure.go +++ b/packages/go/ein/azure.go @@ -453,11 +453,11 @@ func ConvertAzureGroupMembersToRels(data models.GroupMembers) []IngestibleRelati member azure2.DirectoryObject ) if err := json.Unmarshal(raw.Member, &member); err != nil { - log.Errorf(SerialError, "azure group member", err) + log.Errorf(fmt.Sprintf(SerialError, "azure group member", err)) } else if memberType, err := ExtractTypeFromDirectoryObject(member); errors.Is(err, ErrInvalidType) { - log.Warnf(ExtractError, err) + log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(ExtractError, err) + log.Errorf(fmt.Sprintf(ExtractError, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ @@ -487,11 +487,11 @@ func ConvertAzureGroupOwnerToRels(data models.GroupOwners) []IngestibleRelations owner azure2.DirectoryObject ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(SerialError, "azure group owner", err) + log.Errorf(fmt.Sprintf(SerialError, "azure group owner", err)) } else if ownerType, err := ExtractTypeFromDirectoryObject(owner); errors.Is(err, ErrInvalidType) { - log.Warnf(ExtractError, err) + log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(ExtractError, err) + log.Errorf(fmt.Sprintf(ExtractError, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ @@ -865,7 +865,7 @@ func ConvertAzureRoleAssignmentToRels(roleAssignment azure2.UnifiedRoleAssignmen if CanAddSecret(roleAssignment.RoleDefinitionId) && roleAssignment.DirectoryScopeId != "/" { if relType, err := GetAddSecretRoleKind(roleAssignment.RoleDefinitionId); err != nil { - log.Errorf("Error processing role assignment for role %s: %v", roleObjectId, err) + log.Errorf(fmt.Sprintf("Error processing role assignment for role %s: %v", roleObjectId, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ @@ -1073,11 +1073,11 @@ func ConvertAzureServicePrincipalOwnerToRels(data models.ServicePrincipalOwners) ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(SerialError, "azure service principal owner", err) + log.Errorf(fmt.Sprintf(SerialError, "azure service principal owner", err)) } else if ownerType, err := ExtractTypeFromDirectoryObject(owner); errors.Is(err, ErrInvalidType) { - log.Warnf(ExtractError, err) + log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(ExtractError, err) + log.Errorf(fmt.Sprintf(ExtractError, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ diff --git a/packages/go/log/cmd/logtest/main.go b/packages/go/log/cmd/logtest/main.go index 932213debd..fd5f84cfcc 100644 --- a/packages/go/log/cmd/logtest/main.go +++ b/packages/go/log/cmd/logtest/main.go @@ -17,13 +17,15 @@ package main import ( + "fmt" + "github.com/specterops/bloodhound/log" ) func main() { - log.Infof("This is an info log message: %s", "test") - log.Warnf("This is a warning log message: %s", "test") - log.Errorf("This is a error log message: %s", "test") - log.Fatalf("This is a fatal log message and will kill the application with exit 1: %s", "test") - log.Errorf("This should never be seen, the Fatalf call is broken!") + log.Infof(fmt.Sprintf("This is an info log message: %s", "test")) + log.Warnf(fmt.Sprintf("This is a warning log message: %s", "test")) + log.Errorf(fmt.Sprintf("This is a error log message: %s", "test")) + log.Fatalf(fmt.Sprintf("This is a fatal log message and will kill the application with exit 1: %s", "test")) + log.Errorf(fmt.Sprintf("This should never be seen, the Fatalf call is broken!")) } diff --git a/packages/go/schemagen/generator/cue.go b/packages/go/schemagen/generator/cue.go index 27d63251df..db4a51b627 100644 --- a/packages/go/schemagen/generator/cue.go +++ b/packages/go/schemagen/generator/cue.go @@ -96,7 +96,7 @@ func (s *ConfigBuilder) OverlayPath(rootPath string) error { } else { overlayPath := filepath.Join(s.overlayRootPath, strings.TrimPrefix(path, rootPath)) - log.Debugf("Overlaying file: %s to %s", path, overlayPath) + log.Debugf(fmt.Sprintf("Overlaying file: %s to %s", path, overlayPath)) s.overlay[overlayPath] = load.FromBytes(content) } diff --git a/packages/go/schemagen/main.go b/packages/go/schemagen/main.go index 1786b1f42c..761b5c0260 100644 --- a/packages/go/schemagen/main.go +++ b/packages/go/schemagen/main.go @@ -17,6 +17,7 @@ package main import ( + "fmt" "os" "path/filepath" @@ -73,31 +74,31 @@ func main() { cfgBuilder := generator.NewConfigBuilder("/schemas") if projectRoot, err := generator.FindGolangWorkspaceRoot(); err != nil { - log.Fatalf("Error finding project root: %v", err) + log.Fatalf(fmt.Sprintf("Error finding project root: %v", err)) } else { - log.Infof("Project root is %s", projectRoot) + log.Infof(fmt.Sprintf("Project root is %s", projectRoot)) if err := cfgBuilder.OverlayPath(filepath.Join(projectRoot, "packages/cue")); err != nil { - log.Fatalf("Error: %v", err) + log.Fatalf(fmt.Sprintf("Error: %v", err)) } cfg := cfgBuilder.Build() if bhInstance, err := cfg.Value("/schemas/bh/bh.cue"); err != nil { - log.Fatalf("Error: %v", errors.Details(err, nil)) + log.Fatalf(fmt.Sprintf("Error: %v", errors.Details(err, nil))) } else { var bhModels Schema if err := bhInstance.Decode(&bhModels); err != nil { - log.Fatalf("Error: %v", errors.Details(err, nil)) + log.Fatalf(fmt.Sprintf("Error: %v", errors.Details(err, nil))) } if err := GenerateGolang(projectRoot, bhModels); err != nil { - log.Fatalf("Error %v", err) + log.Fatalf(fmt.Sprintf("Error %v", err)) } if err := GenerateSharedTypeScript(projectRoot, bhModels); err != nil { - log.Fatalf("Error %v", err) + log.Fatalf(fmt.Sprintf("Error %v", err)) } } } diff --git a/packages/go/stbernard/analyzers/js/js.go b/packages/go/stbernard/analyzers/js/js.go index 00a7a7246b..ff565cdd08 100644 --- a/packages/go/stbernard/analyzers/js/js.go +++ b/packages/go/stbernard/analyzers/js/js.go @@ -51,7 +51,7 @@ func Run(jsPaths []string, env environment.Environment) ([]codeclimate.Entry, er result = make([]codeclimate.Entry, 0, len(jsPaths)) ) - log.Infof("Running eslint") + log.Infof(fmt.Sprintf("Running eslint")) for _, path := range jsPaths { entries, err := runEslint(path, env) @@ -63,7 +63,7 @@ func Run(jsPaths []string, env environment.Environment) ([]codeclimate.Entry, er result = append(result, entries...) } - log.Infof("Completed eslint") + log.Infof(fmt.Sprintf("Completed eslint")) return result, exitError } diff --git a/packages/go/stbernard/cmdrunner/cmdrunner.go b/packages/go/stbernard/cmdrunner/cmdrunner.go index cb239ef116..975c6a8336 100644 --- a/packages/go/stbernard/cmdrunner/cmdrunner.go +++ b/packages/go/stbernard/cmdrunner/cmdrunner.go @@ -64,7 +64,7 @@ func Run(command string, args []string, path string, env environment.Environment } } - log.Infof("Running %s for %s", cmdstr, path) + log.Infof(fmt.Sprintf("Running %s for %s", cmdstr, path)) err := cmd.Run() if _, ok := err.(*exec.ExitError); ok { @@ -73,7 +73,7 @@ func Run(command string, args []string, path string, env environment.Environment return fmt.Errorf("%s: %w", cmdstr, err) } - log.Infof("Finished %s for %s", cmdstr, path) + log.Infof(fmt.Sprintf("Finished %s for %s", cmdstr, path)) return exitErr } diff --git a/packages/go/stbernard/command/builder/builder.go b/packages/go/stbernard/command/builder/builder.go index 8b86eeb63a..6be014451b 100644 --- a/packages/go/stbernard/command/builder/builder.go +++ b/packages/go/stbernard/command/builder/builder.go @@ -128,7 +128,7 @@ func clearFiles(path string, entry os.DirEntry, err error) error { return nil } - log.Debugf("Removing %s", filepath.Join(path, entry.Name())) + log.Debugf(fmt.Sprintf("Removing %s", filepath.Join(path, entry.Name()))) if entry.IsDir() { if err := os.RemoveAll(filepath.Join(path, entry.Name())); err != nil { diff --git a/packages/go/stbernard/command/tester/tester.go b/packages/go/stbernard/command/tester/tester.go index 6773735c2e..0f3d3d553e 100644 --- a/packages/go/stbernard/command/tester/tester.go +++ b/packages/go/stbernard/command/tester/tester.go @@ -106,7 +106,7 @@ func (s *command) runTests(cwd string, coverPath string, modPaths []string) erro } if !s.yarnOnly { - log.Infof("Checking coverage directory") + log.Infof(fmt.Sprintf("Checking coverage directory")) if err := os.MkdirAll(coverPath, os.ModeDir+fs.ModePerm); err != nil { return fmt.Errorf("making coverage directory: %w", err) } else if dirList, err := os.ReadDir(coverPath); err != nil { @@ -114,7 +114,7 @@ func (s *command) runTests(cwd string, coverPath string, modPaths []string) erro } else { for _, entry := range dirList { if filepath.Ext(entry.Name()) == golang.CoverageExt { - log.Debugf("Removing %s", filepath.Join(coverPath, entry.Name())) + log.Debugf(fmt.Sprintf("Removing %s", filepath.Join(coverPath, entry.Name()))) if err := os.Remove(filepath.Join(coverPath, entry.Name())); err != nil { return fmt.Errorf("removing %s: %w", filepath.Join(coverPath, entry.Name()), err) } diff --git a/packages/go/stbernard/environment/environment.go b/packages/go/stbernard/environment/environment.go index 0e0df21a6a..d77ba753d5 100644 --- a/packages/go/stbernard/environment/environment.go +++ b/packages/go/stbernard/environment/environment.go @@ -17,6 +17,7 @@ package environment import ( + "fmt" "os" "strings" @@ -55,7 +56,7 @@ func (s Environment) SetIfEmpty(key string, value string) { // Overrides an environment variable with a new value func (s Environment) Override(key string, value string) { - log.Infof("Overriding environment variable %s with %s", key, value) + log.Infof(fmt.Sprintf("Overriding environment variable %s with %s", key, value)) s[key] = value } diff --git a/packages/go/stbernard/git/git.go b/packages/go/stbernard/git/git.go index 51de8b8f0e..bf5bd6348b 100644 --- a/packages/go/stbernard/git/git.go +++ b/packages/go/stbernard/git/git.go @@ -80,7 +80,7 @@ func CheckClean(cwd string, env environment.Environment) (bool, error) { cmd.Stderr = os.Stderr } - log.Infof("Checking repository clean for %s", cwd) + log.Infof(fmt.Sprintf("Checking repository clean for %s", cwd)) // We need to run git status first to ensure we don't hit a cache issue if err := cmdrunner.Run("git", []string{"status"}, cwd, env, func(c *exec.Cmd) { c.Stdout = nil }); err != nil { @@ -93,7 +93,7 @@ func CheckClean(cwd string, env environment.Environment) (bool, error) { } } - log.Infof("Finished checking repository clean for %s", cwd) + log.Infof(fmt.Sprintf("Finished checking repository clean for %s", cwd)) return true, nil } @@ -171,13 +171,13 @@ func getAllVersionTags(cwd string, env environment.Environment) ([]string, error cmd.Stderr = os.Stderr } - log.Infof("Listing tags for %v", cwd) + log.Infof(fmt.Sprintf("Listing tags for %v", cwd)) if err := cmd.Run(); err != nil { return nil, fmt.Errorf("git tag --list v*: %w", err) } - log.Infof("Finished listing tags for %v", cwd) + log.Infof(fmt.Sprintf("Finished listing tags for %v", cwd)) return strings.Split(output.String(), "\n"), nil } diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index b5712e9411..df688d13fd 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -20,6 +20,7 @@ package main import ( "errors" + "fmt" "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/command" @@ -37,21 +38,21 @@ func main() { } if lvl, err := log.ParseLevel(rawLvl); err != nil { - log.Errorf("Could not parse log level from %s: %v", environment.LogLevelVarName, err) + log.Errorf(fmt.Sprintf("Could not parse log level from %s: %v", environment.LogLevelVarName, err)) } else { log.SetGlobalLevel(lvl) } if cmd, err := command.ParseCLI(env); errors.Is(err, command.ErrNoCmd) { - log.Fatalf("No valid command specified") + log.Fatalf(fmt.Sprintf("No valid command specified")) } else if errors.Is(err, command.ErrHelpRequested) { // No need to exit 1 if help was requested return } else if err != nil { - log.Fatalf("Error while parsing command: %v", err) + log.Fatalf(fmt.Sprintf("Error while parsing command: %v", err)) } else if err := cmd.Run(); err != nil { - log.Fatalf("Failed to run command `%s`: %v", cmd.Name(), err) + log.Fatalf(fmt.Sprintf("Failed to run command `%s`: %v", cmd.Name(), err)) } else { - log.Infof("Command `%s` completed successfully", cmd.Name()) + log.Infof(fmt.Sprintf("Command `%s` completed successfully", cmd.Name())) } } diff --git a/packages/go/stbernard/workspace/golang/build.go b/packages/go/stbernard/workspace/golang/build.go index 64b93b1dc9..6e7fb09f53 100644 --- a/packages/go/stbernard/workspace/golang/build.go +++ b/packages/go/stbernard/workspace/golang/build.go @@ -42,7 +42,7 @@ func BuildMainPackages(workRoot string, modPaths []string, env environment.Envir ) if version, err = git.ParseLatestVersionFromTags(workRoot, env); err != nil { - log.Warnf("Failed to parse version from git tags, falling back to environment variable: %v", err) + log.Warnf(fmt.Sprintf("Failed to parse version from git tags, falling back to environment variable: %v", err)) parsedVersion, err := semver.NewVersion(env[environment.VersionVarName]) if err != nil { return fmt.Errorf("error parsing version from environment variable: %w", err) @@ -50,7 +50,7 @@ func BuildMainPackages(workRoot string, modPaths []string, env environment.Envir version = *parsedVersion } - log.Infof("Building for version %s", version.Original()) + log.Infof(fmt.Sprintf("Building for version %s", version.Original())) for _, modPath := range modPaths { wg.Add(1) @@ -104,7 +104,7 @@ func buildModuleMainPackages(buildDir string, modPath string, version semver.Ver mu.Unlock() } - log.Infof("Built package %s", p.Import) + log.Infof(fmt.Sprintf("Built package %s", p.Import)) }(pkg) } } diff --git a/packages/go/stbernard/workspace/yarn/yarn.go b/packages/go/stbernard/workspace/yarn/yarn.go index e140c574f3..c6c5efd5f5 100644 --- a/packages/go/stbernard/workspace/yarn/yarn.go +++ b/packages/go/stbernard/workspace/yarn/yarn.go @@ -164,7 +164,7 @@ func relWorkspaceToAbsWorkspace(cwd string, relWorkspace Workspace) Workspace { func getCoverage(coverFile string) (coverage, error) { var cov coverage if b, err := os.ReadFile(coverFile); err != nil { - log.Warnf("Could not find coverage for %s, skipping", coverFile) + log.Warnf(fmt.Sprintf("Could not find coverage for %s, skipping", coverFile)) return cov, nil } else if err := json.Unmarshal(b, &cov); err != nil { return cov, fmt.Errorf("unmarshal coverage file %s: %w", coverFile, err) From 6e51f9847698dd6314fc5d41fed32e16aade4f29 Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:12:24 -0500 Subject: [PATCH 03/20] BED-4153: Improve context hook --- cmd/api/src/api/middleware/logging.go | 13 ++----------- cmd/api/src/api/middleware/middleware.go | 1 + cmd/api/src/cmd/bhapi/main.go | 3 ++- cmd/api/src/ctx/ctx.go | 1 + packages/go/log/handlers/handlers.go | 13 +++++++++++-- 5 files changed, 17 insertions(+), 14 deletions(-) diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index 00c80a5553..e60ce8692f 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -119,7 +119,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http return func(next http.Handler) http.Handler { return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) { var ( - logAttrs = []slog.Attr{} + logAttrs []slog.Attr requestContext = ctx.FromRequest(request) deadline time.Time @@ -146,7 +146,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http // Defer the log statement and then serve the request defer func() { - slog.LogAttrs(nil, slog.LevelInfo, fmt.Sprintf("%s %s", request.Method, request.URL.RequestURI()), logAttrs...) + slog.LogAttrs(request.Context(), slog.LevelInfo, fmt.Sprintf("%s %s", request.Method, request.URL.RequestURI()), logAttrs...) if !deadline.IsZero() && time.Now().After(deadline) { log.Warnf( @@ -158,23 +158,14 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http next.ServeHTTP(loggedResponse, request) - // Perform auth introspection to log the client/user identity for each call - if requestContext.AuthCtx.Authenticated() { - if identity, err := idResolver.GetIdentity(requestContext.AuthCtx); err == nil { - logAttrs = append(logAttrs, slog.String(identity.Key, identity.ID.String())) - } - } - // Log the token ID and request date if the request contains either header setSignedRequestFields(request, logAttrs) // Add the fields that we care about before exiting logAttrs = append(logAttrs, - slog.String("remote_addr", request.RemoteAddr), slog.String("proto", request.Proto), slog.String("referer", request.Referer()), slog.String("user_agent", request.UserAgent()), - slog.String("request_id", ctx.RequestID(request)), slog.Int64("request_bytes", loggedRequestBody.bytesRead), slog.Int64("response_bytes", loggedResponse.bytesWritten), slog.Int("status", loggedResponse.statusCode), diff --git a/cmd/api/src/api/middleware/middleware.go b/cmd/api/src/api/middleware/middleware.go index acf0ca63a3..a60c7f38de 100644 --- a/cmd/api/src/api/middleware/middleware.go +++ b/cmd/api/src/api/middleware/middleware.go @@ -142,6 +142,7 @@ func ContextMiddleware(next http.Handler) http.Handler { }, RequestedURL: model.AuditableURL(request.URL.String()), RequestIP: parseUserIP(request), + RemoteAddr: request.RemoteAddr, }) // Route the request with the embedded context diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index 615d077e02..8a04ef5cd8 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -26,6 +26,7 @@ import ( "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/handlers" + "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/database" @@ -59,7 +60,7 @@ func main() { printVersion() } - logger := slog.New(&handlers.ContextHandler{Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr})}) + logger := slog.New(&handlers.ContextHandler{IdResolver: auth.NewIdentityResolver(), Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr})}) slog.SetDefault(logger) // Initialize basic logging facilities while we start up diff --git a/cmd/api/src/ctx/ctx.go b/cmd/api/src/ctx/ctx.go index 33e273cb36..8e15b74971 100644 --- a/cmd/api/src/ctx/ctx.go +++ b/cmd/api/src/ctx/ctx.go @@ -42,6 +42,7 @@ type Context struct { Host *url.URL RequestedURL model.AuditableURL RequestIP string + RemoteAddr string } func (s *Context) ConstructGoContext() context.Context { diff --git a/packages/go/log/handlers/handlers.go b/packages/go/log/handlers/handlers.go index 1db68b4f28..e98ef5fc70 100644 --- a/packages/go/log/handlers/handlers.go +++ b/packages/go/log/handlers/handlers.go @@ -4,10 +4,13 @@ import ( "context" "log/slog" + "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" ) type ContextHandler struct { + IdResolver auth.IdentityResolver + slog.Handler } @@ -21,8 +24,14 @@ func (h ContextHandler) Handle(c context.Context, r slog.Record) error { r.Add(slog.String("request_ip", bhCtx.RequestIP)) } - if !bhCtx.AuthCtx.Session.UserID.IsNil() { - r.Add("user_id", bhCtx.AuthCtx.Session.UserID) + if bhCtx.RemoteAddr != "" { + r.Add(slog.String("remote_addr", bhCtx.RemoteAddr)) + } + + if bhCtx.AuthCtx.Authenticated() { + if identity, err := h.IdResolver.GetIdentity(bhCtx.AuthCtx); err == nil { + r.Add(slog.String(identity.Key, identity.ID.String())) + } } } From 3ae944b8a75f6ebfd13612ac4bf01584bef21b6c Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:42:16 -0500 Subject: [PATCH 04/20] BED-4153: Fix IDResolver name --- cmd/api/src/cmd/bhapi/main.go | 2 +- packages/go/log/handlers/handlers.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index 8a04ef5cd8..942db0095a 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -60,7 +60,7 @@ func main() { printVersion() } - logger := slog.New(&handlers.ContextHandler{IdResolver: auth.NewIdentityResolver(), Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr})}) + logger := slog.New(&handlers.ContextHandler{IDResolver: auth.NewIdentityResolver(), Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr})}) slog.SetDefault(logger) // Initialize basic logging facilities while we start up diff --git a/packages/go/log/handlers/handlers.go b/packages/go/log/handlers/handlers.go index e98ef5fc70..e42904ab9a 100644 --- a/packages/go/log/handlers/handlers.go +++ b/packages/go/log/handlers/handlers.go @@ -9,7 +9,7 @@ import ( ) type ContextHandler struct { - IdResolver auth.IdentityResolver + IDResolver auth.IdentityResolver slog.Handler } @@ -29,7 +29,7 @@ func (h ContextHandler) Handle(c context.Context, r slog.Record) error { } if bhCtx.AuthCtx.Authenticated() { - if identity, err := h.IdResolver.GetIdentity(bhCtx.AuthCtx); err == nil { + if identity, err := h.IDResolver.GetIdentity(bhCtx.AuthCtx); err == nil { r.Add(slog.String(identity.Key, identity.ID.String())) } } From 697b3ee00ec98b032b5cbe496cd6d7976650493f Mon Sep 17 00:00:00 2001 From: Alyx Holms Date: Wed, 8 Jan 2025 16:05:08 -0700 Subject: [PATCH 05/20] chore: refactor out slog measure package --- cmd/api/src/api/middleware/logging.go | 2 +- .../api/middleware/logging_internal_test.go | 28 ------- cmd/api/src/daemons/datapipe/jobs.go | 2 +- packages/go/log/log.go | 27 ------- packages/go/log/measure/measure.go | 74 +++++++++++++++++++ 5 files changed, 76 insertions(+), 57 deletions(-) create mode 100644 packages/go/log/measure/measure.go diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index e60ce8692f..10996a3d1e 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -136,7 +136,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http // assign a deadline, but only if a valid timeout has been supplied via the prefer header timeout, err := RequestWaitDuration(request) if err != nil { - log.Errorf(fmt.Sprintf("Error parsing prefer header for timeout: %w", err)) + log.Errorf(fmt.Sprintf("Error parsing prefer header for timeout: %v", err)) } else if err == nil && timeout > 0 { deadline = time.Now().Add(timeout * time.Second) } diff --git a/cmd/api/src/api/middleware/logging_internal_test.go b/cmd/api/src/api/middleware/logging_internal_test.go index 1eeb65771e..79ca36aff4 100644 --- a/cmd/api/src/api/middleware/logging_internal_test.go +++ b/cmd/api/src/api/middleware/logging_internal_test.go @@ -22,10 +22,8 @@ import ( "time" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log/mocks" "github.com/specterops/bloodhound/src/test/must" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" ) func Test_signedRequestDate(t *testing.T) { @@ -43,29 +41,3 @@ func Test_signedRequestDate(t *testing.T) { require.True(t, hasHeader) require.Equal(t, expectedTime.Format(time.RFC3339Nano), requestDate) } - -func Test_fetchSignedRequestFields(t *testing.T) { - var ( - mockCtrl = gomock.NewController(t) - mockLogEvent = mocks.NewMockEvent(mockCtrl) - - expectedTime = time.Now() - expectedID = must.NewUUIDv4() - request = must.NewHTTPRequest(http.MethodGet, "http://example.com/", nil) - ) - - request.Header.Set(headers.Authorization.String(), "bhesignature "+expectedID.String()) - request.Header.Set(headers.RequestDate.String(), expectedTime.Format(time.RFC3339Nano)) - - mockLogEvent.EXPECT().Str("signed_request_date", expectedTime.Format(time.RFC3339Nano)).Times(1) - mockLogEvent.EXPECT().Str("token_id", expectedID.String()).Times(1) - - setSignedRequestFields(request, mockLogEvent) - - // Remove auth header since it is non-fatal if it is missing - request.Header.Del(headers.Authorization.String()) - - mockLogEvent.EXPECT().Str("signed_request_date", expectedTime.Format(time.RFC3339Nano)).Times(1) - - setSignedRequestFields(request, mockLogEvent) -} diff --git a/cmd/api/src/daemons/datapipe/jobs.go b/cmd/api/src/daemons/datapipe/jobs.go index bb1f7fb86d..097da91d77 100644 --- a/cmd/api/src/daemons/datapipe/jobs.go +++ b/cmd/api/src/daemons/datapipe/jobs.go @@ -221,7 +221,7 @@ func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType mo if err := file.Close(); err != nil { log.Errorf(fmt.Sprintf("Error closing ingest file %s: %v", filePath, err)) } else if err := os.Remove(filePath); errors.Is(err, fs.ErrNotExist) { - log.Warnf(fmt.Sprintf("Removing ingest file %s: %w", filePath, err)) + log.Warnf(fmt.Sprintf("Removing ingest file %s: %v", filePath, err)) } else if err != nil { log.Errorf(fmt.Sprintf("Error removing ingest file %s: %v", filePath, err)) } diff --git a/packages/go/log/log.go b/packages/go/log/log.go index 7a548bce8a..fb334d5de0 100644 --- a/packages/go/log/log.go +++ b/packages/go/log/log.go @@ -18,7 +18,6 @@ package log import ( "fmt" - "log/slog" "os" "strings" "sync/atomic" @@ -252,29 +251,3 @@ func LogAndMeasure(level Level, format string, args ...any) func() { } } } - -func SlogMeasure(level slog.Level, format string, args ...any) func() { - then := time.Now() - - return func() { - if elapsed := time.Since(then); elapsed >= measureThreshold { - slog.Log(nil, level, fmt.Sprintf(format, args...), FieldElapsed, elapsed) - } - } -} - -func SlogLogAndMeasure(level slog.Level, format string, args ...any) func() { - var ( - pairID = logMeasurePairCounter.Add(1) - message = fmt.Sprintf(format, args...) - then = time.Now() - ) - - slog.Log(nil, level, message, FieldMeasurementID, pairID) - - return func() { - if elapsed := time.Since(then); elapsed >= measureThreshold { - slog.Log(nil, level, message, FieldMeasurementID, pairID, FieldElapsed, elapsed) - } - } -} diff --git a/packages/go/log/measure/measure.go b/packages/go/log/measure/measure.go new file mode 100644 index 0000000000..ce730fcb18 --- /dev/null +++ b/packages/go/log/measure/measure.go @@ -0,0 +1,74 @@ +package measure + +import ( + "context" + "log/slog" + "sync/atomic" + "time" +) + +const ( + FieldElapsed = "elapsed" + FieldMeasurementID = "measurement_id" +) + +var ( + logMeasurePairCounter = atomic.Uint64{} + measureThreshold = time.Second +) + +func ContextMeasure(ctx context.Context, level slog.Level, msg string, args ...any) func() { + then := time.Now() + + return func() { + if elapsed := time.Since(then); elapsed >= measureThreshold { + args = append(args, FieldElapsed, elapsed) + slog.Log(ctx, level, msg, args...) + } + } +} + +func Measure(level slog.Level, msg string, args ...any) func() { + then := time.Now() + + return func() { + if elapsed := time.Since(then); elapsed >= measureThreshold { + args = append(args, FieldElapsed, elapsed) + slog.Log(context.TODO(), level, msg, args...) + } + } +} + +func ContextLogAndMeasure(ctx context.Context, level slog.Level, msg string, args ...any) func() { + var ( + pairID = logMeasurePairCounter.Add(1) + then = time.Now() + ) + + args = append(args, FieldMeasurementID, pairID) + slog.Log(ctx, level, msg, args...) + + return func() { + if elapsed := time.Since(then); elapsed >= measureThreshold { + args = append(args, FieldElapsed, elapsed) + slog.Log(ctx, level, msg, args...) + } + } +} + +func LogAndMeasure(level slog.Level, msg string, args ...any) func() { + var ( + pairID = logMeasurePairCounter.Add(1) + then = time.Now() + ) + + args = append(args, FieldMeasurementID, pairID) + slog.Log(context.TODO(), level, msg, args...) + + return func() { + if elapsed := time.Since(then); elapsed >= measureThreshold { + args = append(args, FieldElapsed, elapsed) + slog.Log(context.TODO(), level, msg, args...) + } + } +} From f31aa25ecd3b55925b9971914ab3dc733b3eb092 Mon Sep 17 00:00:00 2001 From: Alyx Holms Date: Wed, 8 Jan 2025 16:27:14 -0700 Subject: [PATCH 06/20] chore: reference implementation for supporting runtime level manipulation --- cmd/api/src/cmd/bhapi/main.go | 3 +-- packages/go/log/handlers/handlers.go | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index 942db0095a..c74b3fe9a5 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -26,7 +26,6 @@ import ( "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/handlers" - "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/database" @@ -60,7 +59,7 @@ func main() { printVersion() } - logger := slog.New(&handlers.ContextHandler{IDResolver: auth.NewIdentityResolver(), Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr})}) + logger := handlers.NewDefaultLogger() slog.SetDefault(logger) // Initialize basic logging facilities while we start up diff --git a/packages/go/log/handlers/handlers.go b/packages/go/log/handlers/handlers.go index e42904ab9a..d44a860cbc 100644 --- a/packages/go/log/handlers/handlers.go +++ b/packages/go/log/handlers/handlers.go @@ -3,11 +3,14 @@ package handlers import ( "context" "log/slog" + "os" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" ) +var lvl = new(slog.LevelVar) + type ContextHandler struct { IDResolver auth.IdentityResolver @@ -45,3 +48,15 @@ func ReplaceAttr(_ []string, a slog.Attr) slog.Attr { return a } + +func NewDefaultLogger() *slog.Logger { + return slog.New(&ContextHandler{IDResolver: auth.NewIdentityResolver(), Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: lvl, ReplaceAttr: ReplaceAttr})}) +} + +func SetGlobalLevel(level slog.Level) { + lvl.Set(level) +} + +func GlobalLevel() slog.Level { + return lvl.Level() +} From eecf9cb2ac2beca33f56a81c565ee825d1be4dfa Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Wed, 8 Jan 2025 19:05:06 -0500 Subject: [PATCH 07/20] BED-4153: Migrate to new LogAndMeasure --- cmd/api/src/api/tools/pg.go | 8 +++++--- cmd/api/src/daemons/datapipe/datapipe.go | 4 +++- cmd/api/src/migrations/manifest.go | 6 ++++-- packages/go/analysis/ad/queries.go | 4 +++- packages/go/analysis/azure/queries.go | 6 ++++-- packages/go/analysis/azure/role.go | 5 +++-- 6 files changed, 22 insertions(+), 11 deletions(-) diff --git a/cmd/api/src/api/tools/pg.go b/cmd/api/src/api/tools/pg.go index 0b79b18a76..ba69a70c74 100644 --- a/cmd/api/src/api/tools/pg.go +++ b/cmd/api/src/api/tools/pg.go @@ -19,6 +19,8 @@ package tools import ( "context" "fmt" + "github.com/specterops/bloodhound/log/measure" + "log/slog" "net/http" "sync" @@ -42,7 +44,7 @@ const ( ) func migrateTypes(ctx context.Context, neoDB, pgDB graph.Database) error { - defer log.LogAndMeasure(log.LevelInfo, "Migrating kinds from Neo4j to PostgreSQL")() + defer measure.ContextLogAndMeasure(ctx, slog.LevelInfo, "Migrating kinds from Neo4j to PostgreSQL")() var ( neoNodeKinds graph.Kinds @@ -110,7 +112,7 @@ func convertNeo4jProperties(properties *graph.Properties) error { } func migrateNodes(ctx context.Context, neoDB, pgDB graph.Database) (map[graph.ID]graph.ID, error) { - defer log.LogAndMeasure(log.LevelInfo, "Migrating nodes from Neo4j to PostgreSQL")() + defer measure.ContextLogAndMeasure(ctx, slog.LevelInfo, "Migrating nodes from Neo4j to PostgreSQL")() var ( // Start at 2 and assume that the first node of the graph is the graph schema migration information @@ -149,7 +151,7 @@ func migrateNodes(ctx context.Context, neoDB, pgDB graph.Database) (map[graph.ID } func migrateEdges(ctx context.Context, neoDB, pgDB graph.Database, nodeIDMappings map[graph.ID]graph.ID) error { - defer log.LogAndMeasure(log.LevelInfo, "Migrating edges from Neo4j to PostgreSQL")() + defer measure.ContextLogAndMeasure(ctx, slog.LevelInfo, "Migrating edges from Neo4j to PostgreSQL")() return neoDB.ReadTransaction(ctx, func(tx graph.Transaction) error { return tx.Relationships().Fetch(func(cursor graph.Cursor[*graph.Relationship]) error { diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 0f50e36f43..4b73b8ec9a 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -20,6 +20,8 @@ import ( "context" "errors" "fmt" + "github.com/specterops/bloodhound/log/measure" + "log/slog" "time" "github.com/specterops/bloodhound/cache" @@ -80,7 +82,7 @@ func (s *Daemon) analyze() { return } - defer log.LogAndMeasure(log.LevelInfo, "Graph Analysis")() + defer measure.LogAndMeasure(slog.LevelInfo, "Graph Analysis")() if err := RunAnalysisOperations(s.ctx, s.db, s.graphdb, s.cfg); err != nil { if errors.Is(err, ErrAnalysisFailed) { diff --git a/cmd/api/src/migrations/manifest.go b/cmd/api/src/migrations/manifest.go index 67d309c7d1..2c96323aca 100644 --- a/cmd/api/src/migrations/manifest.go +++ b/cmd/api/src/migrations/manifest.go @@ -20,6 +20,8 @@ import ( "context" "errors" "fmt" + "github.com/specterops/bloodhound/log/measure" + "log/slog" "strings" "time" @@ -50,7 +52,7 @@ func RequiresMigration(ctx context.Context, db graph.Database) (bool, error) { // Version_620_Migration is intended to rename the RemoteInteractiveLogonPrivilege edge to RemoteInteractiveLogonRight // See: https://specterops.atlassian.net/browse/BED-4428 func Version_620_Migration(db graph.Database) error { - defer log.LogAndMeasure(log.LevelInfo, "Migration to rename RemoteInteractiveLogonPrivilege edges")() + defer measure.LogAndMeasure(slog.LevelInfo, "Migration to rename RemoteInteractiveLogonPrivilege edges")() // MATCH p=(n:Base)-[:RemoteInteractiveLogonPrivilege]->(m:Base) RETURN p targetCriteria := query.And( @@ -91,7 +93,7 @@ func Version_620_Migration(db graph.Database) error { // node.Kinds = Kinds{ad.Entity, ad.User, ad.Computer} must be reset to: // node.Kinds = Kinds{ad.Entity} func Version_513_Migration(db graph.Database) error { - defer log.LogAndMeasure(log.LevelInfo, "Migration to remove incorrectly ingested labels")() + defer measure.LogAndMeasure(slog.LevelInfo, "Migration to remove incorrectly ingested labels")() // Cypher for the below filter is: size(labels(n)) > 2 and not (n:Group and n:ADLocalGroup) or size(labels(n)) > 3 and (n:Group and n:ADLocalGroup) targetCriteria := query.Or( diff --git a/packages/go/analysis/ad/queries.go b/packages/go/analysis/ad/queries.go index 71a09d7853..3abedb485c 100644 --- a/packages/go/analysis/ad/queries.go +++ b/packages/go/analysis/ad/queries.go @@ -19,6 +19,8 @@ package ad import ( "context" "fmt" + "github.com/specterops/bloodhound/log/measure" + "log/slog" "strings" "time" @@ -111,7 +113,7 @@ func FetchAllDomains(ctx context.Context, db graph.Database) ([]*graph.Node, err } func FetchActiveDirectoryTierZeroRoots(ctx context.Context, db graph.Database, domain *graph.Node, autoTagT0ParentObjectsFlag bool) (graph.NodeSet, error) { - defer log.LogAndMeasure(log.LevelInfo, "FetchActiveDirectoryTierZeroRoots")() + defer measure.ContextLogAndMeasure(ctx, slog.LevelInfo, "FetchActiveDirectoryTierZeroRoots")() if domainSID, err := domain.Properties.Get(common.ObjectID.String()).String(); err != nil { return nil, err diff --git a/packages/go/analysis/azure/queries.go b/packages/go/analysis/azure/queries.go index 461c0ca996..a9c4489f2b 100644 --- a/packages/go/analysis/azure/queries.go +++ b/packages/go/analysis/azure/queries.go @@ -19,6 +19,8 @@ package azure import ( "context" "fmt" + "github.com/specterops/bloodhound/log/measure" + "log/slog" "strings" "github.com/RoaringBitmap/roaring/roaring64" @@ -55,7 +57,7 @@ func GetCollectedTenants(ctx context.Context, db graph.Database) (graph.NodeSet, } func FetchGraphDBTierZeroTaggedAssets(tx graph.Transaction, tenant *graph.Node) (graph.NodeSet, error) { - defer log.LogAndMeasure(log.LevelInfo, "Tenant %d FetchGraphDBTierZeroTaggedAssets", tenant.ID)() + defer measure.LogAndMeasure(slog.LevelInfo, "FetchGraphDBTierZeroTaggedAssets", "tenant_id", tenant.ID)() if tenantObjectID, err := tenant.Properties.Get(common.ObjectID.String()).String(); err != nil { log.Errorf(fmt.Sprintf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err)) @@ -76,7 +78,7 @@ func FetchGraphDBTierZeroTaggedAssets(tx graph.Transaction, tenant *graph.Node) } func FetchAzureAttackPathRoots(tx graph.Transaction, tenant *graph.Node) (graph.NodeSet, error) { - defer log.LogAndMeasure(log.LevelDebug, "Tenant %d FetchAzureAttackPathRoots", tenant.ID)() + defer measure.LogAndMeasure(slog.LevelDebug, "FetchAzureAttackPathRoots", "tenant_id", tenant.ID)() attackPathRoots := graph.NewNodeKindSet() diff --git a/packages/go/analysis/azure/role.go b/packages/go/analysis/azure/role.go index 933a39ff6b..2d1058d741 100644 --- a/packages/go/analysis/azure/role.go +++ b/packages/go/analysis/azure/role.go @@ -19,6 +19,8 @@ package azure import ( "context" "fmt" + "github.com/specterops/bloodhound/log/measure" + "log/slog" "slices" "github.com/specterops/bloodhound/dawgs/cardinality" @@ -26,7 +28,6 @@ import ( "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/azure" - "github.com/specterops/bloodhound/log" ) func NewRoleEntityDetails(node *graph.Node) RoleDetails { @@ -270,7 +271,7 @@ func roleMembers(tx graph.Transaction, tenantRoles graph.NodeSet, additionalRela // RoleMembersWithGrants returns the NodeSet of members for a given set of roles, including those members who may be able to grant themselves one of the given roles // NOTE: The current implementation also includes the role nodes in the returned set. It may be worth considering removing those nodes from the set if doing so doesn't break tier zero/high value assignment func RoleMembersWithGrants(tx graph.Transaction, tenant *graph.Node, roleTemplateIDs ...string) (graph.NodeSet, error) { - defer log.LogAndMeasure(log.LevelInfo, "Tenant %d RoleMembersWithGrants", tenant.ID)() + defer measure.LogAndMeasure(slog.LevelInfo, "RoleMembersWithGrants", "tenant_id", tenant.ID)() if tenantRoles, err := TenantRoles(tx, tenant, roleTemplateIDs...); err != nil { return nil, err From 4532dcaf043f7afbe0f63b4788bf94f88ecd7358 Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Wed, 8 Jan 2025 19:54:35 -0500 Subject: [PATCH 08/20] BED-4153: Migrate to new Measure --- cmd/api/src/api/tools/pg.go | 2 +- cmd/api/src/api/v2/analysisrequest.go | 4 +++- cmd/api/src/api/v2/dataquality.go | 5 +++-- cmd/api/src/api/v2/file_uploads.go | 7 ++++--- cmd/api/src/daemons/datapipe/agi.go | 11 +++++++---- cmd/api/src/daemons/datapipe/datapipe.go | 4 ++-- cmd/api/src/migrations/manifest.go | 11 +++++------ cmd/api/src/queries/graph.go | 17 ++++++++--------- cmd/api/src/services/agi/agi.go | 5 ++++- cmd/api/src/services/dataquality/dataquality.go | 4 +++- packages/go/analysis/ad/ad.go | 11 +++++++---- packages/go/analysis/ad/membership.go | 4 +++- packages/go/analysis/ad/queries.go | 10 +++++----- packages/go/analysis/analysis.go | 5 +++-- packages/go/analysis/azure/queries.go | 2 +- packages/go/analysis/azure/role.go | 2 +- packages/go/analysis/azure/tenant.go | 5 +++-- packages/go/analysis/impact/aggregator.go | 5 ++++- packages/go/analysis/impact/id_aggregator.go | 4 +++- packages/go/analysis/post.go | 6 ++++-- packages/go/analysis/post_operation.go | 4 +++- packages/go/dawgs/ops/traversal.go | 5 +++-- packages/go/dawgs/traversal/traversal.go | 4 +++- packages/go/graphschema/ad/ad.go | 1 + packages/go/graphschema/azure/azure.go | 1 + packages/go/graphschema/common/common.go | 1 + packages/go/log/handlers/handlers.go | 16 ++++++++++++++++ packages/go/log/measure/measure.go | 16 ++++++++++++++++ 28 files changed, 118 insertions(+), 54 deletions(-) diff --git a/cmd/api/src/api/tools/pg.go b/cmd/api/src/api/tools/pg.go index ba69a70c74..8fb0c04ff2 100644 --- a/cmd/api/src/api/tools/pg.go +++ b/cmd/api/src/api/tools/pg.go @@ -19,7 +19,6 @@ package tools import ( "context" "fmt" - "github.com/specterops/bloodhound/log/measure" "log/slog" "net/http" "sync" @@ -31,6 +30,7 @@ import ( "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/util/size" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/config" ) diff --git a/cmd/api/src/api/v2/analysisrequest.go b/cmd/api/src/api/v2/analysisrequest.go index 1a6f1b3ddd..ed0b448158 100644 --- a/cmd/api/src/api/v2/analysisrequest.go +++ b/cmd/api/src/api/v2/analysisrequest.go @@ -20,9 +20,11 @@ import ( "database/sql" "errors" "fmt" + "log/slog" "net/http" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -40,7 +42,7 @@ func (s Resources) GetAnalysisRequest(response http.ResponseWriter, request *htt } func (s Resources) RequestAnalysis(response http.ResponseWriter, request *http.Request) { - defer log.Measure(log.LevelDebug, "Requesting analysis")() + defer measure.ContextMeasure(request.Context(), slog.LevelDebug, "Requesting analysis")() var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { diff --git a/cmd/api/src/api/v2/dataquality.go b/cmd/api/src/api/v2/dataquality.go index c82b728cb8..523925ad18 100644 --- a/cmd/api/src/api/v2/dataquality.go +++ b/cmd/api/src/api/v2/dataquality.go @@ -18,13 +18,14 @@ package v2 import ( "fmt" + "log/slog" "net/http" "strings" "github.com/gorilla/mux" "github.com/specterops/bloodhound/analysis/ad" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/utils" @@ -37,7 +38,7 @@ const ( ) func (s Resources) GetDatabaseCompleteness(response http.ResponseWriter, request *http.Request) { - defer log.Measure(log.LevelDebug, "Get Current Database Completeness")() + defer measure.ContextMeasure(request.Context(), slog.LevelDebug, "Get Current Database Completeness")() result := make(map[string]float64) diff --git a/cmd/api/src/api/v2/file_uploads.go b/cmd/api/src/api/v2/file_uploads.go index 3d19522fa4..b69299ff70 100644 --- a/cmd/api/src/api/v2/file_uploads.go +++ b/cmd/api/src/api/v2/file_uploads.go @@ -19,6 +19,7 @@ package v2 import ( "errors" "fmt" + "log/slog" "mime" "net/http" "slices" @@ -27,7 +28,7 @@ import ( "github.com/gorilla/mux" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -108,7 +109,7 @@ func (s Resources) ListFileUploadJobs(response http.ResponseWriter, request *htt } func (s Resources) StartFileUploadJob(response http.ResponseWriter, request *http.Request) { - defer log.Measure(log.LevelDebug, "Starting new file upload job")() + defer measure.ContextMeasure(request.Context(), slog.LevelDebug, "Starting new file upload job")() reqCtx := ctx.Get(request.Context()) if user, valid := auth.GetUserFromAuthCtx(reqCtx.AuthCtx); !valid { @@ -150,7 +151,7 @@ func (s Resources) ProcessFileUpload(response http.ResponseWriter, request *http } func (s Resources) EndFileUploadJob(response http.ResponseWriter, request *http.Request) { - defer log.Measure(log.LevelDebug, "Finished file upload job")() + defer measure.ContextMeasure(request.Context(), slog.LevelDebug, "Finished file upload job")() fileUploadJobIdString := mux.Vars(request)[FileUploadJobIdPathParameterName] diff --git a/cmd/api/src/daemons/datapipe/agi.go b/cmd/api/src/daemons/datapipe/agi.go index eed7e69135..707c1e3171 100644 --- a/cmd/api/src/daemons/datapipe/agi.go +++ b/cmd/api/src/daemons/datapipe/agi.go @@ -19,8 +19,11 @@ package datapipe import ( "context" "fmt" + "log/slog" "sync" + "github.com/specterops/bloodhound/log/measure" + commonanalysis "github.com/specterops/bloodhound/analysis" adAnalysis "github.com/specterops/bloodhound/analysis/ad" azureAnalysis "github.com/specterops/bloodhound/analysis/azure" @@ -38,7 +41,7 @@ import ( ) func updateAssetGroupIsolationTags(ctx context.Context, db agi.AgiData, graphDB graph.Database) error { - defer log.Measure(log.LevelInfo, "Updated asset group isolation tags")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Updated asset group isolation tags")() if err := commonanalysis.ClearSystemTags(ctx, graphDB); err != nil { return err @@ -48,7 +51,7 @@ func updateAssetGroupIsolationTags(ctx context.Context, db agi.AgiData, graphDB } func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { - defer log.Measure(log.LevelInfo, "Finished tagging Azure Tier Zero")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Finished tagging Azure Tier Zero")() var tenants graph.NodeSet @@ -157,7 +160,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { } func TagActiveDirectoryTierZero(ctx context.Context, featureFlagProvider appcfg.GetFlagByKeyer, graphDB graph.Database) error { - defer log.Measure(log.LevelInfo, "Finished tagging Active Directory Tier Zero")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Finished tagging Active Directory Tier Zero")() if autoTagT0ParentObjectsFlag, err := featureFlagProvider.GetFlagByKey(ctx, appcfg.FeatureAutoTagT0ParentObjects); err != nil { return err @@ -184,7 +187,7 @@ func TagActiveDirectoryTierZero(ctx context.Context, featureFlagProvider appcfg. } func RunAssetGroupIsolationCollections(ctx context.Context, db database.Database, graphDB graph.Database, kindGetter func(*graph.Node) string) error { - defer log.Measure(log.LevelInfo, "Asset Group Isolation Collections")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Asset Group Isolation Collections")() if assetGroups, err := db.GetAllAssetGroups(ctx, "", model.SQLFilter{}); err != nil { return err diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 4b73b8ec9a..6e5086b5b8 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -20,13 +20,13 @@ import ( "context" "errors" "fmt" - "github.com/specterops/bloodhound/log/measure" "log/slog" "time" "github.com/specterops/bloodhound/cache" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/database" @@ -182,7 +182,7 @@ func (s *Daemon) deleteData() { _ = s.db.DeleteAnalysisRequest(s.ctx) _ = s.db.RequestAnalysis(s.ctx, "datapie") }() - defer log.Measure(log.LevelInfo, "Purge Graph Data Completed")() + defer measure.Measure(slog.LevelInfo, "Purge Graph Data Completed")() if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusPurging, false); err != nil { log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) diff --git a/cmd/api/src/migrations/manifest.go b/cmd/api/src/migrations/manifest.go index 2c96323aca..85f1e88590 100644 --- a/cmd/api/src/migrations/manifest.go +++ b/cmd/api/src/migrations/manifest.go @@ -20,13 +20,11 @@ import ( "context" "errors" "fmt" - "github.com/specterops/bloodhound/log/measure" "log/slog" "strings" "time" "github.com/specterops/bloodhound/analysis" - "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" @@ -34,6 +32,7 @@ import ( "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/version" ) @@ -150,7 +149,7 @@ func Version_513_Migration(db graph.Database) error { } func Version_508_Migration(db graph.Database) error { - defer log.Measure(log.LevelInfo, "Migrating Azure Owns to Owner")() + defer measure.Measure(slog.LevelInfo, "Migrating Azure Owns to Owner")() return db.BatchOperation(context.Background(), func(batch graph.Batch) error { return batch.Relationships().Filterf(func() graph.Criteria { @@ -183,7 +182,7 @@ func Version_508_Migration(db graph.Database) error { } func Version_277_Migration(db graph.Database) error { - defer log.Measure(log.LevelInfo, "Migrating node property casing")() + defer measure.Measure(slog.LevelInfo, "Migrating node property casing")() return db.BatchOperation(context.Background(), func(batch graph.Batch) error { if err := batch.Nodes().Filterf(func() graph.Criteria { @@ -259,7 +258,7 @@ var Manifest = []Migration{ { Version: version.Version{Major: 2, Minor: 3, Patch: 0}, Execute: func(db graph.Database) error { - defer log.Measure(log.LevelInfo, "Deleting all existing role nodes")() + defer measure.Measure(slog.LevelInfo, "Deleting all existing role nodes")() return db.WriteTransaction(context.Background(), func(tx graph.Transaction) error { return tx.Nodes().Filterf(func() graph.Criteria { @@ -271,7 +270,7 @@ var Manifest = []Migration{ { Version: version.Version{Major: 2, Minor: 6, Patch: 3}, Execute: func(db graph.Database) error { - defer log.Measure(log.LevelInfo, "Deleting all LocalToComputer/RemoteInteractiveLogin edges and ADLocalGroup labels")() + defer measure.Measure(slog.LevelInfo, "Deleting all LocalToComputer/RemoteInteractiveLogin edges and ADLocalGroup labels")() return db.WriteTransaction(context.Background(), func(tx graph.Transaction) error { //Remove ADLocalGroup label from all nodes that also have the group label diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index 81d5b4b58b..10ed4f374f 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -23,6 +23,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "net/url" "sort" @@ -31,28 +32,26 @@ import ( "sync" "time" - "github.com/specterops/bloodhound/dawgs/util" - - "github.com/specterops/bloodhound/cypher/models/cypher/format" - "github.com/specterops/bloodhound/src/config" - "github.com/specterops/bloodhound/src/services/agi" - - bhCtx "github.com/specterops/bloodhound/src/ctx" - "github.com/gorilla/mux" "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/cache" "github.com/specterops/bloodhound/cypher/analyzer" "github.com/specterops/bloodhound/cypher/frontend" + "github.com/specterops/bloodhound/cypher/models/cypher/format" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" + "github.com/specterops/bloodhound/dawgs/util" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api/bloodhoundgraph" + "github.com/specterops/bloodhound/src/config" + bhCtx "github.com/specterops/bloodhound/src/ctx" "github.com/specterops/bloodhound/src/model" + "github.com/specterops/bloodhound/src/services/agi" "github.com/specterops/bloodhound/src/utils" ) @@ -239,7 +238,7 @@ func (s *GraphQuery) GetAssetGroupNodes(ctx context.Context, assetGroupTag strin } func (s *GraphQuery) GetAllShortestPaths(ctx context.Context, startNodeID string, endNodeID string, filter graph.Criteria) (graph.PathSet, error) { - defer log.Measure(log.LevelInfo, "GetAllShortestPaths")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "GetAllShortestPaths")() var paths graph.PathSet diff --git a/cmd/api/src/services/agi/agi.go b/cmd/api/src/services/agi/agi.go index d6a1092c7c..4aeca7d60f 100644 --- a/cmd/api/src/services/agi/agi.go +++ b/cmd/api/src/services/agi/agi.go @@ -20,9 +20,12 @@ package agi import ( "context" "fmt" + "log/slog" "slices" "strings" + "github.com/specterops/bloodhound/log/measure" + "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" @@ -72,7 +75,7 @@ func FetchAssetGroupNodes(tx graph.Transaction, assetGroupTag string, isSystemGr } func RunAssetGroupIsolationCollections(ctx context.Context, db AgiData, graphDB graph.Database) error { - defer log.Measure(log.LevelInfo, "Asset Group Isolation Collections")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Asset Group Isolation Collections")() if assetGroups, err := db.GetAllAssetGroups(ctx, "", model.SQLFilter{}); err != nil { return err diff --git a/cmd/api/src/services/dataquality/dataquality.go b/cmd/api/src/services/dataquality/dataquality.go index 606e09e500..6e8dd69c4a 100644 --- a/cmd/api/src/services/dataquality/dataquality.go +++ b/cmd/api/src/services/dataquality/dataquality.go @@ -20,9 +20,11 @@ package dataquality import ( "context" "fmt" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/analysis/ad" "github.com/specterops/bloodhound/src/analysis/azure" "github.com/specterops/bloodhound/src/model" @@ -37,7 +39,7 @@ type DataQualityData interface { func SaveDataQuality(ctx context.Context, db DataQualityData, graphDB graph.Database) error { log.Infof(fmt.Sprintf("Started Data Quality Stats Collection")) - defer log.Measure(log.LevelInfo, "Successfully Completed Data Quality Stats Collection")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Successfully Completed Data Quality Stats Collection")() if stats, aggregation, err := ad.GraphStats(ctx, graphDB); err != nil { return fmt.Errorf("could not get active directory data quality stats: %w", err) diff --git a/packages/go/analysis/ad/ad.go b/packages/go/analysis/ad/ad.go index 340dc36522..70aa461f1f 100644 --- a/packages/go/analysis/ad/ad.go +++ b/packages/go/analysis/ad/ad.go @@ -19,10 +19,13 @@ package ad import ( "context" "fmt" + "log/slog" "sort" "strings" "time" + "github.com/specterops/bloodhound/log/measure" + "github.com/specterops/bloodhound/analysis/impact" "github.com/specterops/bloodhound/dawgs/cardinality" @@ -74,7 +77,7 @@ func TierZeroWellKnownSIDSuffixes() []string { } func FetchWellKnownTierZeroEntities(ctx context.Context, db graph.Database, domainSID string) (graph.NodeSet, error) { - defer log.Measure(log.LevelInfo, "FetchWellKnownTierZeroEntities")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "FetchWellKnownTierZeroEntities")() nodes := graph.NewNodeSet() @@ -119,7 +122,7 @@ func FetchWellKnownTierZeroEntities(ctx context.Context, db graph.Database, doma } func FixWellKnownNodeTypes(ctx context.Context, db graph.Database) error { - defer log.Measure(log.LevelInfo, "Fix well known node types")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Fix well known node types")() groupSuffixes := []string{EnterpriseKeyAdminsGroupSIDSuffix, KeyAdminsGroupSIDSuffix, @@ -158,7 +161,7 @@ func FixWellKnownNodeTypes(ctx context.Context, db graph.Database) error { } func RunDomainAssociations(ctx context.Context, db graph.Database) error { - defer log.Measure(log.LevelInfo, "Domain Associations")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Domain Associations")() return db.WriteTransaction(ctx, func(tx graph.Transaction) error { if domainNamesByObjectID, err := grabDomainInformation(tx); err != nil { @@ -217,7 +220,7 @@ func grabDomainInformation(tx graph.Transaction) (map[string]string, error) { } func LinkWellKnownGroups(ctx context.Context, db graph.Database) error { - defer log.Measure(log.LevelInfo, "Link well known groups")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Link well known groups")() var ( errors = util.NewErrorCollector() diff --git a/packages/go/analysis/ad/membership.go b/packages/go/analysis/ad/membership.go index c7370c7132..75f732d4fd 100644 --- a/packages/go/analysis/ad/membership.go +++ b/packages/go/analysis/ad/membership.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/analysis/impact" @@ -29,10 +30,11 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) func ResolveAllGroupMemberships(ctx context.Context, db graph.Database, additionalCriteria ...graph.Criteria) (impact.PathAggregator, error) { - defer log.Measure(log.LevelInfo, "ResolveAllGroupMemberships")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "ResolveAllGroupMemberships")() var ( adGroupIDs []graph.ID diff --git a/packages/go/analysis/ad/queries.go b/packages/go/analysis/ad/queries.go index 3abedb485c..3e1a37f28a 100644 --- a/packages/go/analysis/ad/queries.go +++ b/packages/go/analysis/ad/queries.go @@ -19,7 +19,6 @@ package ad import ( "context" "fmt" - "github.com/specterops/bloodhound/log/measure" "log/slog" "strings" "time" @@ -35,10 +34,11 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) func FetchGraphDBTierZeroTaggedAssets(ctx context.Context, db graph.Database, domainSID string) (graph.NodeSet, error) { - defer log.Measure(log.LevelInfo, "FetchGraphDBTierZeroTaggedAssets")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "FetchGraphDBTierZeroTaggedAssets")() var ( nodes graph.NodeSet @@ -60,7 +60,7 @@ func FetchGraphDBTierZeroTaggedAssets(ctx context.Context, db graph.Database, do } func FetchAllEnforcedGPOs(ctx context.Context, db graph.Database, targets graph.NodeSet) (graph.NodeSet, error) { - defer log.Measure(log.LevelInfo, "FetchAllEnforcedGPOs")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "FetchAllEnforcedGPOs")() enforcedGPOs := graph.NewNodeSet() @@ -78,7 +78,7 @@ func FetchAllEnforcedGPOs(ctx context.Context, db graph.Database, targets graph. } func FetchOUContainers(ctx context.Context, db graph.Database, targets graph.NodeSet) (graph.NodeSet, error) { - defer log.Measure(log.LevelInfo, "FetchOUContainers")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "FetchOUContainers")() oUs := graph.NewNodeSet() @@ -1514,7 +1514,7 @@ func FetchUserSessionCompleteness(tx graph.Transaction, domainSIDs ...string) (f } func FetchAllGroupMembers(ctx context.Context, db graph.Database, targets graph.NodeSet) (graph.NodeSet, error) { - defer log.Measure(log.LevelInfo, "FetchAllGroupMembers")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "FetchAllGroupMembers")() log.Infof(fmt.Sprintf("Fetching group members for %d AD nodes", len(targets))) diff --git a/packages/go/analysis/analysis.go b/packages/go/analysis/analysis.go index 6c1f2028b5..af375e65cf 100644 --- a/packages/go/analysis/analysis.go +++ b/packages/go/analysis/analysis.go @@ -19,6 +19,7 @@ package analysis import ( "context" "fmt" + "log/slog" "slices" "github.com/specterops/bloodhound/dawgs/graph" @@ -27,7 +28,7 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/slicesext" ) @@ -89,7 +90,7 @@ func GetNodeKind(node *graph.Node) graph.Kind { } func ClearSystemTags(ctx context.Context, db graph.Database) error { - defer log.Measure(log.LevelInfo, "ClearSystemTagsIncludeMeta")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "ClearSystemTagsIncludeMeta")() var ( props = graph.NewProperties() diff --git a/packages/go/analysis/azure/queries.go b/packages/go/analysis/azure/queries.go index a9c4489f2b..b5b08d4469 100644 --- a/packages/go/analysis/azure/queries.go +++ b/packages/go/analysis/azure/queries.go @@ -19,7 +19,6 @@ package azure import ( "context" "fmt" - "github.com/specterops/bloodhound/log/measure" "log/slog" "strings" @@ -31,6 +30,7 @@ import ( "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) func FetchCollectedTenants(tx graph.Transaction) (graph.NodeSet, error) { diff --git a/packages/go/analysis/azure/role.go b/packages/go/analysis/azure/role.go index 2d1058d741..cb384bcaca 100644 --- a/packages/go/analysis/azure/role.go +++ b/packages/go/analysis/azure/role.go @@ -19,7 +19,6 @@ package azure import ( "context" "fmt" - "github.com/specterops/bloodhound/log/measure" "log/slog" "slices" @@ -28,6 +27,7 @@ import ( "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/azure" + "github.com/specterops/bloodhound/log/measure" ) func NewRoleEntityDetails(node *graph.Node) RoleDetails { diff --git a/packages/go/analysis/azure/tenant.go b/packages/go/analysis/azure/tenant.go index e85db77e58..81ffa77468 100644 --- a/packages/go/analysis/azure/tenant.go +++ b/packages/go/analysis/azure/tenant.go @@ -19,12 +19,13 @@ package azure import ( "context" "fmt" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/azure" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) func NewTenantEntityDetails(node *graph.Node) TenantDetails { @@ -102,7 +103,7 @@ func FetchTenants(ctx context.Context, db graph.Database) (graph.NodeSet, error) // TenantRoles returns the NodeSet of roles for a given tenant that match one of the given role template IDs. If no role template ID is provided, then all of the tenant role nodes are returned in the NodeSet. func TenantRoles(tx graph.Transaction, tenant *graph.Node, roleTemplateIDs ...string) (graph.NodeSet, error) { - defer log.Measure(log.LevelInfo, "TenantRoles - Tenant %d", tenant.ID)() + defer measure.Measure(slog.LevelInfo, "TenantRoles - Tenant %d", tenant.ID)() if !IsTenantNode(tenant) { return nil, fmt.Errorf("cannot fetch tenant roles - node %d must be of kind %s", tenant.ID, azure.Tenant) diff --git a/packages/go/analysis/impact/aggregator.go b/packages/go/analysis/impact/aggregator.go index f19267f111..135ca3a6e3 100644 --- a/packages/go/analysis/impact/aggregator.go +++ b/packages/go/analysis/impact/aggregator.go @@ -18,6 +18,9 @@ package impact import ( "fmt" + "log/slog" + + "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" @@ -161,7 +164,7 @@ func (s Aggregator) resolve(targetID uint64) cardinality.Provider[uint64] { func (s Aggregator) Cardinality(targets ...uint64) cardinality.Provider[uint64] { log.Debugf(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) - defer log.Measure(log.LevelDebug, "Calculated pathMembers cardinality for %d targets", len(targets))() + defer measure.Measure(slog.LevelDebug, "Calculated pathMembers cardinality", "num_targets", len(targets))() impact := s.newCardinalityProvider() diff --git a/packages/go/analysis/impact/id_aggregator.go b/packages/go/analysis/impact/id_aggregator.go index ba069c6e3b..76c97a0adc 100644 --- a/packages/go/analysis/impact/id_aggregator.go +++ b/packages/go/analysis/impact/id_aggregator.go @@ -18,11 +18,13 @@ package impact import ( "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) type PathAggregator interface { @@ -213,7 +215,7 @@ func (s IDA) resolve(targetID uint64) cardinality.Provider[uint64] { func (s IDA) Cardinality(targets ...uint64) cardinality.Provider[uint64] { log.Debugf(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) - defer log.Measure(log.LevelDebug, "Calculated pathMembers cardinality for %d targets", len(targets))() + defer measure.Measure(slog.LevelDebug, "Calculated pathMembers cardinality for %d targets", len(targets))() impact := s.newCardinalityProvider() diff --git a/packages/go/analysis/post.go b/packages/go/analysis/post.go index 446e12b8fd..7df0894452 100644 --- a/packages/go/analysis/post.go +++ b/packages/go/analysis/post.go @@ -19,6 +19,7 @@ package analysis import ( "context" "fmt" + "log/slog" "sort" "github.com/specterops/bloodhound/dawgs/graph" @@ -27,6 +28,7 @@ import ( "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/common" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) func statsSortedKeys(value map[graph.Kind]int) []graph.Kind { @@ -122,7 +124,7 @@ type DeleteRelationshipJob struct { } func DeleteTransitEdges(ctx context.Context, db graph.Database, baseKinds graph.Kinds, targetRelationships ...graph.Kind) (*AtomicPostProcessingStats, error) { - defer log.Measure(log.LevelInfo, "Finished deleting transit edges")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Finished deleting transit edges")() var ( relationshipIDs []graph.ID @@ -172,7 +174,7 @@ func NodesWithoutRelationshipsFilter() graph.Criteria { } func ClearOrphanedNodes(ctx context.Context, db graph.Database) error { - defer log.Measure(log.LevelInfo, "Finished deleting orphaned nodes")() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "Finished deleting orphaned nodes")() var operation = ops.StartNewOperation[graph.ID](ops.OperationContext{ Parent: ctx, diff --git a/packages/go/analysis/post_operation.go b/packages/go/analysis/post_operation.go index 1f4fd464f8..d289a86854 100644 --- a/packages/go/analysis/post_operation.go +++ b/packages/go/analysis/post_operation.go @@ -19,6 +19,7 @@ package analysis import ( "context" "fmt" + "log/slog" "sync" "sync/atomic" "time" @@ -27,6 +28,7 @@ import ( "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/graphschema/common" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) type StatTrackedOperation[T any] struct { @@ -38,7 +40,7 @@ func NewPostRelationshipOperation(ctx context.Context, db graph.Database, operat operation := StatTrackedOperation[CreatePostRelationshipJob]{} operation.NewOperation(ctx, db) operation.Operation.SubmitWriter(func(ctx context.Context, batch graph.Batch, inC <-chan CreatePostRelationshipJob) error { - defer log.Measure(log.LevelInfo, operationName)() + defer measure.ContextMeasure(ctx, slog.LevelInfo, operationName)() var ( relProp = NewPropertiesWithLastSeen() diff --git a/packages/go/dawgs/ops/traversal.go b/packages/go/dawgs/ops/traversal.go index 21bca9f387..7838d12645 100644 --- a/packages/go/dawgs/ops/traversal.go +++ b/packages/go/dawgs/ops/traversal.go @@ -19,11 +19,12 @@ package ops import ( "errors" "fmt" + "log/slog" "github.com/RoaringBitmap/roaring/roaring64" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/query" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) type LimitSkipTracker struct { @@ -143,7 +144,7 @@ type TraversalContext struct { } func Traversal(tx graph.Transaction, plan TraversalPlan, pathVisitor PathVisitor) error { - defer log.Measure(log.LevelInfo, "Node %d Traversal", plan.Root.ID)() + defer measure.Measure(slog.LevelInfo, "Node %d Traversal", plan.Root.ID)() var ( requireTraversalOrder = plan.Limit > 0 || plan.Skip > 0 diff --git a/packages/go/dawgs/traversal/traversal.go b/packages/go/dawgs/traversal/traversal.go index fc0eeca30c..538c0131f9 100644 --- a/packages/go/dawgs/traversal/traversal.go +++ b/packages/go/dawgs/traversal/traversal.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "sync" "sync/atomic" @@ -32,6 +33,7 @@ import ( "github.com/specterops/bloodhound/dawgs/util/atomics" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/log/measure" ) // Driver is a function that drives sending queries to the graph and retrieving vertexes and edges. Traversal @@ -285,7 +287,7 @@ func New(db graph.Database, numParallelWorkers int) Traversal { } func (s Traversal) BreadthFirst(ctx context.Context, plan Plan) error { - defer log.Measure(log.LevelDebug, "BreadthFirst - %d workers", s.numWorkers)() + defer measure.ContextMeasure(ctx, slog.LevelDebug, "BreadthFirst - %d workers", s.numWorkers)() var ( // workerWG keeps count of background workers launched in goroutines diff --git a/packages/go/graphschema/ad/ad.go b/packages/go/graphschema/ad/ad.go index 20e66a8b4f..4acf12c18e 100644 --- a/packages/go/graphschema/ad/ad.go +++ b/packages/go/graphschema/ad/ad.go @@ -21,6 +21,7 @@ package ad import ( "errors" + graph "github.com/specterops/bloodhound/dawgs/graph" ) diff --git a/packages/go/graphschema/azure/azure.go b/packages/go/graphschema/azure/azure.go index 00b20f190f..787ee392e6 100644 --- a/packages/go/graphschema/azure/azure.go +++ b/packages/go/graphschema/azure/azure.go @@ -21,6 +21,7 @@ package azure import ( "errors" + graph "github.com/specterops/bloodhound/dawgs/graph" ) diff --git a/packages/go/graphschema/common/common.go b/packages/go/graphschema/common/common.go index 631871c6bf..73edf123fa 100644 --- a/packages/go/graphschema/common/common.go +++ b/packages/go/graphschema/common/common.go @@ -21,6 +21,7 @@ package common import ( "errors" + graph "github.com/specterops/bloodhound/dawgs/graph" ) diff --git a/packages/go/log/handlers/handlers.go b/packages/go/log/handlers/handlers.go index d44a860cbc..f37bcaa3c9 100644 --- a/packages/go/log/handlers/handlers.go +++ b/packages/go/log/handlers/handlers.go @@ -1,3 +1,19 @@ +// Copyright 2025 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + package handlers import ( diff --git a/packages/go/log/measure/measure.go b/packages/go/log/measure/measure.go index ce730fcb18..f940b34ec2 100644 --- a/packages/go/log/measure/measure.go +++ b/packages/go/log/measure/measure.go @@ -1,3 +1,19 @@ +// Copyright 2025 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + package measure import ( From 3b3da8500e5f2603c0d48feaa132544ad07cee69 Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Wed, 8 Jan 2025 22:23:17 -0500 Subject: [PATCH 09/20] BED-4153: Migrate log.Info --- cmd/api/src/api/auth.go | 13 +++--- cmd/api/src/api/middleware/compression.go | 3 +- cmd/api/src/api/tools/dbswitch.go | 4 +- cmd/api/src/api/tools/pg.go | 10 ++--- cmd/api/src/api/v2/apiclient/apiclient.go | 6 +-- cmd/api/src/bootstrap/initializer.go | 7 ++- cmd/api/src/bootstrap/server.go | 12 ++--- cmd/api/src/bootstrap/util.go | 7 +-- cmd/api/src/config/config.go | 5 ++- cmd/api/src/daemons/daemon.go | 5 ++- cmd/api/src/daemons/datapipe/cleanup.go | 7 +-- cmd/api/src/daemons/datapipe/datapipe.go | 4 +- cmd/api/src/database/analysisrequest.go | 5 ++- cmd/api/src/database/log.go | 28 ++++++------ cmd/api/src/database/migration/stepwise.go | 10 ++--- cmd/api/src/migrations/graph.go | 7 +-- cmd/api/src/migrations/manifest.go | 6 +-- cmd/api/src/model/samlprovider.go | 3 +- cmd/api/src/queries/graph.go | 4 +- .../src/services/dataquality/dataquality.go | 3 +- cmd/api/src/services/entrypoint.go | 3 +- packages/go/analysis/ad/adcscache.go | 3 +- packages/go/analysis/ad/membership.go | 3 +- packages/go/analysis/ad/post.go | 7 +-- packages/go/analysis/ad/queries.go | 5 +-- .../models/pgsql/translate/expression.go | 5 +-- packages/go/dawgs/drivers/neo4j/index.go | 6 +-- .../go/dawgs/drivers/neo4j/transaction.go | 3 +- packages/go/dawgs/drivers/pg/batch.go | 4 +- packages/go/dawgs/drivers/pg/tooling.go | 5 +-- packages/go/log/cmd/logtest/main.go | 11 ++--- packages/go/log/handlers/handlers.go | 31 +++++++++++++ packages/go/log/log.go | 45 +------------------ packages/go/schemagen/main.go | 3 +- packages/go/stbernard/analyzers/js/js.go | 6 +-- packages/go/stbernard/cmdrunner/cmdrunner.go | 5 ++- .../go/stbernard/command/tester/tester.go | 3 +- .../go/stbernard/environment/environment.go | 5 +-- packages/go/stbernard/git/git.go | 9 ++-- packages/go/stbernard/main.go | 3 +- .../go/stbernard/workspace/golang/build.go | 5 ++- 41 files changed, 158 insertions(+), 161 deletions(-) diff --git a/cmd/api/src/api/auth.go b/cmd/api/src/api/auth.go index 9c57508987..b950a61906 100644 --- a/cmd/api/src/api/auth.go +++ b/cmd/api/src/api/auth.go @@ -27,6 +27,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "strconv" "strings" @@ -417,7 +418,7 @@ func (s authenticator) CreateSession(ctx context.Context, user model.User, authP return "", ErrUserDisabled } - log.Infof(fmt.Sprintf("Creating session for user: %s(%s)", user.ID, user.PrincipalName)) + slog.InfoContext(ctx, fmt.Sprintf("Creating session for user: %s(%s)", user.ID, user.PrincipalName)) userSession := model.UserSession{ User: user, @@ -475,16 +476,16 @@ func (s authenticator) ValidateSession(ctx context.Context, jwtTokenString strin return auth.Context{}, err } else if !token.Valid { - log.Infof(fmt.Sprintf("Token invalid")) + slog.InfoContext(ctx, fmt.Sprintf("Token invalid")) return auth.Context{}, ErrInvalidAuth } else if sessionID, err := claims.SessionID(); err != nil { - log.Infof(fmt.Sprintf("Session ID %s invalid: %v", claims.Id, err)) + slog.InfoContext(ctx, fmt.Sprintf("Session ID %s invalid: %v", claims.Id, err)) return auth.Context{}, ErrInvalidAuth } else if session, err := s.db.GetUserSession(ctx, sessionID); err != nil { - log.Infof(fmt.Sprintf("Unable to find session %d", sessionID)) + slog.InfoContext(ctx, fmt.Sprintf("Unable to find session %d", sessionID)) return auth.Context{}, ErrInvalidAuth } else if session.Expired() { - log.Infof(fmt.Sprintf("Session %d is expired", sessionID)) + slog.InfoContext(ctx, fmt.Sprintf("Session %d is expired", sessionID)) return auth.Context{}, ErrInvalidAuth } else { authContext := auth.Context{ @@ -493,7 +494,7 @@ func (s authenticator) ValidateSession(ctx context.Context, jwtTokenString strin } if session.AuthProviderType == model.SessionAuthProviderSecret && session.User.AuthSecret == nil { - log.Infof(fmt.Sprintf("No auth secret found for user ID %s", session.UserID.String())) + slog.InfoContext(ctx, fmt.Sprintf("No auth secret found for user ID %s", session.UserID.String())) return auth.Context{}, ErrNoUserSecret } else if session.AuthProviderType == model.SessionAuthProviderSecret && session.User.AuthSecret.Expired() { var ( diff --git a/cmd/api/src/api/middleware/compression.go b/cmd/api/src/api/middleware/compression.go index 7ee341ad7b..e78b0eb7f8 100644 --- a/cmd/api/src/api/middleware/compression.go +++ b/cmd/api/src/api/middleware/compression.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "strings" @@ -106,7 +107,7 @@ func wrapBody(encoding string, body io.ReadCloser) (io.ReadCloser, error) { case "deflate": newBody, err = zlib.NewReader(body) default: - log.Infof(fmt.Sprintf("Unsupported encoding detected: %s", encoding)) + slog.Info(fmt.Sprintf("Unsupported encoding detected: %s", encoding)) err = errUnsupportedEncoding } return newBody, err diff --git a/cmd/api/src/api/tools/dbswitch.go b/cmd/api/src/api/tools/dbswitch.go index 1d71a5c0ca..25d050c132 100644 --- a/cmd/api/src/api/tools/dbswitch.go +++ b/cmd/api/src/api/tools/dbswitch.go @@ -20,9 +20,9 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/jackc/pgx/v5" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/config" ) @@ -84,7 +84,7 @@ func LookupGraphDriver(ctx context.Context, cfg config.Configuration) (string, e if setDriverName, err := GetGraphDriver(ctx, pgxConn); err != nil { if errors.Is(err, pgx.ErrNoRows) { - log.Infof(fmt.Sprintf("No database driver has been set for migration, using: %s", driverName)) + slog.InfoContext(ctx, fmt.Sprintf("No database driver has been set for migration, using: %s", driverName)) } else { return "", err } diff --git a/cmd/api/src/api/tools/pg.go b/cmd/api/src/api/tools/pg.go index 8fb0c04ff2..c2027da65f 100644 --- a/cmd/api/src/api/tools/pg.go +++ b/cmd/api/src/api/tools/pg.go @@ -246,7 +246,7 @@ func (s *PGMigrator) SwitchPostgreSQL(response http.ResponseWriter, request *htt s.graphDBSwitch.Switch(pgDB) response.WriteHeader(http.StatusOK) - log.Infof(fmt.Sprintf("Updated default graph driver to PostgreSQL")) + slog.InfoContext(request.Context(), "Updated default graph driver to PostgreSQL") } } @@ -266,7 +266,7 @@ func (s *PGMigrator) SwitchNeo4j(response http.ResponseWriter, request *http.Req s.graphDBSwitch.Switch(neo4jDB) response.WriteHeader(http.StatusOK) - log.Infof(fmt.Sprintf("Updated default graph driver to Neo4j")) + slog.InfoContext(request.Context(), "Updated default graph driver to Neo4j") } } @@ -284,7 +284,7 @@ func (s *PGMigrator) startMigration() error { }); err != nil { return fmt.Errorf("failed connecting to PostgreSQL: %w", err) } else { - log.Infof(fmt.Sprintf("Dispatching live migration from Neo4j to PostgreSQL")) + slog.Info("Dispatching live migration from Neo4j to PostgreSQL") migrationCtx, migrationCancelFunc := context.WithCancel(s.serverCtx) s.migrationCancelFunc = migrationCancelFunc @@ -292,7 +292,7 @@ func (s *PGMigrator) startMigration() error { go func(ctx context.Context) { defer migrationCancelFunc() - log.Infof(fmt.Sprintf("Starting live migration from Neo4j to PostgreSQL")) + slog.InfoContext(ctx, fmt.Sprintf("Starting live migration from Neo4j to PostgreSQL")) if err := pgDB.AssertSchema(ctx, s.graphSchema); err != nil { log.Errorf(fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) @@ -303,7 +303,7 @@ func (s *PGMigrator) startMigration() error { } else if err := migrateEdges(ctx, neo4jDB, pgDB, nodeIDMappings); err != nil { log.Errorf(fmt.Sprintf("Failed importing edges into PostgreSQL: %v", err)) } else { - log.Infof(fmt.Sprintf("Migration to PostgreSQL completed successfully")) + slog.InfoContext(ctx, fmt.Sprintf("Migration to PostgreSQL completed successfully")) } if err := s.advanceState(stateIdle, stateMigrating, stateCanceling); err != nil { diff --git a/cmd/api/src/api/v2/apiclient/apiclient.go b/cmd/api/src/api/v2/apiclient/apiclient.go index 44801c0fe5..8841ccfb29 100644 --- a/cmd/api/src/api/v2/apiclient/apiclient.go +++ b/cmd/api/src/api/v2/apiclient/apiclient.go @@ -22,13 +22,13 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "net/http" "net/url" "time" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/api" ) @@ -114,7 +114,7 @@ func (s Client) ZipRequest(method, path string, params url.Values, body []byte) return nil, fmt.Errorf("waited %f seconds while retrying - Request failure cause: %w", maxSleep.Seconds(), err) } - log.Infof(fmt.Sprintf("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err)) + slog.Info(fmt.Sprintf("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err)) time.Sleep(sleepInterval) } else { return response, nil @@ -170,7 +170,7 @@ func (s Client) Request(method, path string, params url.Values, body any, header return nil, fmt.Errorf("waited %f seconds while retrying - Request failure cause: %w", maxSleep.Seconds(), err) } - log.Infof(fmt.Sprintf("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err)) + slog.Info(fmt.Sprintf("Request to %s failed with error: %v. Attempting a retry.", endpoint.String(), err)) time.Sleep(sleepInterval) } else { return response, nil diff --git a/cmd/api/src/bootstrap/initializer.go b/cmd/api/src/bootstrap/initializer.go index f55603419b..30afd423e7 100644 --- a/cmd/api/src/bootstrap/initializer.go +++ b/cmd/api/src/bootstrap/initializer.go @@ -19,9 +19,9 @@ package bootstrap import ( "context" "fmt" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/daemons" "github.com/specterops/bloodhound/src/database" @@ -86,11 +86,10 @@ func (s Initializer[DBType, GraphType]) Launch(parentCtx context.Context, handle } // Log successful start and wait for a signal to exit - log.Infof(fmt.Sprintf("Server started successfully")) + slog.InfoContext(ctx, "Server started successfully") <-ctx.Done() - log.Infof(fmt.Sprintf("Shutting down")) - + slog.InfoContext(ctx, "Shutting down") // TODO: Refactor this pattern in favor of context handling daemonManager.Stop() diff --git a/cmd/api/src/bootstrap/server.go b/cmd/api/src/bootstrap/server.go index bc123f11a2..1b7eedb288 100644 --- a/cmd/api/src/bootstrap/server.go +++ b/cmd/api/src/bootstrap/server.go @@ -19,6 +19,7 @@ package bootstrap import ( "context" "fmt" + "log/slog" "os" "os/signal" "strings" @@ -26,7 +27,6 @@ import ( "time" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/database" @@ -115,11 +115,11 @@ func MigrateDB(ctx context.Context, cfg config.Configuration, db database.Databa paddingString := strings.Repeat(" ", len(passwordMsg)-2) borderString := strings.Repeat("#", len(passwordMsg)) - log.Infof(fmt.Sprintf("%s", borderString)) - log.Infof(fmt.Sprintf("#%s#", paddingString)) - log.Infof(fmt.Sprintf("%s", passwordMsg)) - log.Infof(fmt.Sprintf("#%s#", paddingString)) - log.Infof(fmt.Sprintf("%s", borderString)) + slog.Info(fmt.Sprintf("%s", borderString)) + slog.Info(fmt.Sprintf("#%s#", paddingString)) + slog.Info(fmt.Sprintf("%s", passwordMsg)) + slog.Info(fmt.Sprintf("#%s#", paddingString)) + slog.Info(fmt.Sprintf("%s", borderString)) } } diff --git a/cmd/api/src/bootstrap/util.go b/cmd/api/src/bootstrap/util.go index 8fe3c18d14..6d6ffb3598 100644 --- a/cmd/api/src/bootstrap/util.go +++ b/cmd/api/src/bootstrap/util.go @@ -19,6 +19,7 @@ package bootstrap import ( "context" "fmt" + "log/slog" "os" "github.com/specterops/bloodhound/dawgs" @@ -80,11 +81,11 @@ func ConnectGraph(ctx context.Context, cfg config.Configuration) (*graph.Databas } else { switch driverName { case neo4j.DriverName: - log.Infof(fmt.Sprintf("Connecting to graph using Neo4j")) + slog.InfoContext(ctx, "Connecting to graph using Neo4j") connectionString = cfg.Neo4J.Neo4jConnectionString() case pg.DriverName: - log.Infof(fmt.Sprintf("Connecting to graph using PostgreSQL")) + slog.InfoContext(ctx, "Connecting to graph using PostgreSQL") connectionString = cfg.Database.PostgreSQLConnectionString() default: @@ -118,6 +119,6 @@ func InitializeLogging(cfg config.Configuration) error { log.Configure(log.DefaultConfiguration().WithLevel(logLevel)) - log.Infof(fmt.Sprintf("Logging configured")) + slog.Info("Logging configured") return nil } diff --git a/cmd/api/src/config/config.go b/cmd/api/src/config/config.go index a7426726bd..cb8b45c6c5 100644 --- a/cmd/api/src/config/config.go +++ b/cmd/api/src/config/config.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "os" "path/filepath" "regexp" @@ -263,11 +264,11 @@ func getConfiguration(path string, defaultConfigFunc func() (Configuration, erro if hasCfgFile, err := HasConfigurationFile(path); err != nil { return Configuration{}, err } else if hasCfgFile { - log.Infof(fmt.Sprintf("Reading configuration found at %s", path)) + slog.Info(fmt.Sprintf("Reading configuration found at %s", path)) return ReadConfigurationFile(path) } else { - log.Infof(fmt.Sprintf("No configuration file found at %s. Returning defaults.", path)) + slog.Info(fmt.Sprintf("No configuration file found at %s. Returning defaults.", path)) return defaultConfigFunc() } diff --git a/cmd/api/src/daemons/daemon.go b/cmd/api/src/daemons/daemon.go index f133c7db1f..bbae5a1534 100644 --- a/cmd/api/src/daemons/daemon.go +++ b/cmd/api/src/daemons/daemon.go @@ -19,6 +19,7 @@ package daemons import ( "context" "fmt" + "log/slog" "sync" "time" @@ -49,7 +50,7 @@ func (s *Manager) Start(ctx context.Context, daemons ...Daemon) { defer s.daemonsLock.Unlock() for _, daemon := range daemons { - log.Infof(fmt.Sprintf("Starting daemon %s", daemon.Name())) + slog.InfoContext(ctx, fmt.Sprintf("Starting daemon %s", daemon.Name())) go daemon.Start(ctx) s.daemons = append(s.daemons, daemon) @@ -64,7 +65,7 @@ func (s *Manager) Stop() { defer cancel() for _, daemon := range s.daemons { - log.Infof(fmt.Sprintf("Shutting down daemon %s", daemon.Name())) + slog.Info(fmt.Sprintf("Shutting down daemon %s", daemon.Name())) if err := daemon.Stop(shutdownCtx); err != nil { log.Errorf(fmt.Sprintf("Failure caught while shutting down daemon %s: %v", daemon.Name(), err)) diff --git a/cmd/api/src/daemons/datapipe/cleanup.go b/cmd/api/src/daemons/datapipe/cleanup.go index 65578e84aa..aabcbeb426 100644 --- a/cmd/api/src/daemons/datapipe/cleanup.go +++ b/cmd/api/src/daemons/datapipe/cleanup.go @@ -21,6 +21,7 @@ package datapipe import ( "context" "fmt" + "log/slog" "os" "path/filepath" "strings" @@ -80,7 +81,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin // Release the lock once finished defer s.lock.Unlock() - log.Infof(fmt.Sprintf("Running OrphanFileSweeper for path %s", s.tempDirectoryRootPath)) + slog.InfoContext(ctx, fmt.Sprintf("Running OrphanFileSweeper for path %s", s.tempDirectoryRootPath)) log.Debugf(fmt.Sprintf("OrphanFileSweeper expected names %v", expectedFileNames)) if dirEntries, err := s.fileOps.ReadDir(s.tempDirectoryRootPath); err != nil { @@ -112,7 +113,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin break } - log.Infof(fmt.Sprintf("Removing orphaned file %s", orphanedDirEntry.Name())) + slog.InfoContext(ctx, fmt.Sprintf("Removing orphaned file %s", orphanedDirEntry.Name())) fullPath := filepath.Join(s.tempDirectoryRootPath, orphanedDirEntry.Name()) if err := s.fileOps.RemoveAll(fullPath); err != nil { @@ -123,7 +124,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin } if numDeleted > 0 { - log.Infof(fmt.Sprintf("Finished removing %d orphaned ingest files", numDeleted)) + slog.InfoContext(ctx, fmt.Sprintf("Finished removing %d orphaned ingest files", numDeleted)) } } } diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 6e5086b5b8..9ad4697ea3 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -119,7 +119,7 @@ func resetCache(cacher cache.Cache, _ bool) { if err := cacher.Reset(); err != nil { log.Errorf(fmt.Sprintf("Error while resetting the cache: %v", err)) } else { - log.Infof(fmt.Sprintf("Cache successfully reset by datapipe daemon")) + slog.Info("Cache successfully reset by datapipe daemon") } } @@ -189,7 +189,7 @@ func (s *Daemon) deleteData() { return } - log.Infof(fmt.Sprintf("Begin Purge Graph Data")) + slog.Info("Begin Purge Graph Data") if err := s.db.CancelAllFileUploads(s.ctx); err != nil { log.Errorf(fmt.Sprintf("Error cancelling jobs during data deletion: %v", err)) diff --git a/cmd/api/src/database/analysisrequest.go b/cmd/api/src/database/analysisrequest.go index f3306f1306..95a2dcd45d 100644 --- a/cmd/api/src/database/analysisrequest.go +++ b/cmd/api/src/database/analysisrequest.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "time" "github.com/specterops/bloodhound/log" @@ -93,12 +94,12 @@ func (s *BloodhoundDB) setAnalysisRequest(ctx context.Context, requestType model // RequestAnalysis will request an analysis be executed, as long as there isn't an existing analysis request or collected graph data deletion request, then it no-ops func (s *BloodhoundDB) RequestAnalysis(ctx context.Context, requestedBy string) error { - log.Infof(fmt.Sprintf("Analysis requested by %s", requestedBy)) + slog.InfoContext(ctx, fmt.Sprintf("Analysis requested by %s", requestedBy)) return s.setAnalysisRequest(ctx, model.AnalysisRequestAnalysis, requestedBy) } // RequestCollectedGraphDataDeletion will request collected graph data be deleted, if an analysis request is present, it will overwrite that. func (s *BloodhoundDB) RequestCollectedGraphDataDeletion(ctx context.Context, requestedBy string) error { - log.Infof(fmt.Sprintf("Collected graph data deletion requested by %s", requestedBy)) + slog.InfoContext(ctx, fmt.Sprintf("Collected graph data deletion requested by %s", requestedBy)) return s.setAnalysisRequest(ctx, model.AnalysisRequestDeletion, requestedBy) } diff --git a/cmd/api/src/database/log.go b/cmd/api/src/database/log.go index 55815253be..66b16420d5 100644 --- a/cmd/api/src/database/log.go +++ b/cmd/api/src/database/log.go @@ -20,6 +20,8 @@ import ( "context" "errors" "fmt" + "github.com/specterops/bloodhound/log/handlers" + "log/slog" "time" "github.com/specterops/bloodhound/log" @@ -37,20 +39,16 @@ func (s *GormLogAdapter) LogMode(level logger.LogLevel) logger.Interface { return s } -func (s GormLogAdapter) Log(event log.Event, msg string, data ...any) { - event.Msgf(msg, data...) -} - func (s *GormLogAdapter) Info(ctx context.Context, msg string, data ...any) { - s.Log(log.Info(), msg, data...) + slog.InfoContext(ctx, fmt.Sprintf(msg, data...)) } func (s *GormLogAdapter) Warn(ctx context.Context, msg string, data ...any) { - s.Log(log.Warn(), msg, data...) + slog.WarnContext(ctx, fmt.Sprintf(msg, data...)) } func (s *GormLogAdapter) Error(ctx context.Context, msg string, data ...any) { - s.Log(log.Error(), msg, data...) + slog.ErrorContext(ctx, fmt.Sprintf(msg, data...)) } func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) { @@ -61,10 +59,10 @@ func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() ( if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { sql, _ := fc() - if log.GlobalAccepts(log.LevelDebug) { - log.Error().Fault(err).Msgf("Database error for query: %s", sql) + if slog.Default().Enabled(ctx, slog.LevelDebug) { + slog.ErrorContext(ctx, "Database error", "query", sql, "err", err, handlers.GetSlogCallStack()) } else { - log.Error().Fault(err).Stack().Msgf("Database error for query: %s", sql) + slog.ErrorContext(ctx, "Database error", "query", sql, "err", err) } } else { elapsed := time.Since(begin) @@ -72,18 +70,18 @@ func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() ( if elapsed >= s.SlowQueryErrorThreshold { sql, rows := fc() - if log.GlobalAccepts(log.LevelDebug) { - log.Errorf(fmt.Sprintf("Slow database query took %d ms addressing %d rows: %s", elapsed.Milliseconds(), rows, sql)) + if slog.Default().Enabled(ctx, slog.LevelDebug) { + slog.ErrorContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "nums_rows", rows, "sql", sql, handlers.GetSlogCallStack()) } else { - log.Error().Stack().Msgf("Slow database query took %d ms addressing %d rows.", elapsed.Milliseconds(), rows) + slog.ErrorContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "num_rows", rows) } } else if elapsed >= s.SlowQueryWarnThreshold { sql, rows := fc() if log.GlobalAccepts(log.LevelDebug) { - log.Warnf(fmt.Sprintf("Slow database query took %d ms addressing %d rows: %s", elapsed.Milliseconds(), rows, sql)) + slog.WarnContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "nums_rows", rows, "sql", sql, handlers.GetSlogCallStack()) } else { - log.Warn().Stack().Msgf("Slow database query took %d ms addressing %d rows.", elapsed.Milliseconds(), rows) + slog.WarnContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "num_rows", rows) } } } diff --git a/cmd/api/src/database/migration/stepwise.go b/cmd/api/src/database/migration/stepwise.go index 24e241663e..39b3538400 100644 --- a/cmd/api/src/database/migration/stepwise.go +++ b/cmd/api/src/database/migration/stepwise.go @@ -19,8 +19,8 @@ package migration import ( "fmt" "io/fs" + "log/slog" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/version" "gorm.io/gorm" @@ -49,7 +49,7 @@ func (s *Migrator) ExecuteMigrations(manifest Manifest) error { } // execute the migration(s) for this version in a transaction - log.Infof(fmt.Sprintf("Executing SQL migrations for %s", versionString)) + slog.Info(fmt.Sprintf("Executing SQL migrations for %s", versionString)) if err := s.DB.Transaction(func(tx *gorm.DB) error { for _, migration := range manifest.Migrations[versionString] { @@ -113,7 +113,7 @@ ALTER TABLE ONLY migrations ALTER COLUMN id SET DEFAULT nextval('migrations_id_s ALTER TABLE ONLY migrations ADD CONSTRAINT migrations_pkey PRIMARY KEY (id);` ) - log.Infof(fmt.Sprintf("Creating migration schema...")) + slog.Info("Creating migration schema...") if err := s.DB.Transaction(func(tx *gorm.DB) error { if result := tx.Exec(createMigrationTableSql); result.Error != nil { return fmt.Errorf("failed to creation migration table: %w", result.Error) @@ -167,7 +167,7 @@ func (s *Migrator) ExecuteStepwiseMigrations() error { return fmt.Errorf("failed to check if migration table exists: %w", err) } else if !hasTable { // no migration table, assume this is new installation - log.Infof(fmt.Sprintf("This is a new SQL database. Initializing schema...")) + slog.Info("This is a new SQL database. Initializing schema...") //initialize migration schema and generate full manifest if err = s.CreateMigrationSchema(); err != nil { return fmt.Errorf("failed to create migration schema: %w", err) @@ -185,7 +185,7 @@ func (s *Migrator) ExecuteStepwiseMigrations() error { // run migrations using the manifest we generated if len(manifest.VersionTable) == 0 { - log.Infof(fmt.Sprintf("No new SQL migrations to run")) + slog.Info("No new SQL migrations to run") return nil } else if err := s.ExecuteMigrations(manifest); err != nil { return fmt.Errorf("could not execute migrations: %w", err) diff --git a/cmd/api/src/migrations/graph.go b/cmd/api/src/migrations/graph.go index c0158c267a..fb633ae47b 100644 --- a/cmd/api/src/migrations/graph.go +++ b/cmd/api/src/migrations/graph.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/query" @@ -144,13 +145,13 @@ func (s *GraphMigrator) executeMigrations(ctx context.Context, originalVersion v for _, nextMigration := range Manifest { if nextMigration.Version.GreaterThan(mostRecentVersion) { - log.Infof(fmt.Sprintf("Graph migration version %s is greater than current version %s", nextMigration.Version, mostRecentVersion)) + slog.InfoContext(ctx, fmt.Sprintf("Graph migration version %s is greater than current version %s", nextMigration.Version, mostRecentVersion)) if err := nextMigration.Execute(s.db); err != nil { return fmt.Errorf("migration version %s failed: %w", nextMigration.Version.String(), err) } - log.Infof(fmt.Sprintf("Graph migration version %s executed successfully", nextMigration.Version)) + slog.InfoContext(ctx, fmt.Sprintf("Graph migration version %s executed successfully", nextMigration.Version)) mostRecentVersion = nextMigration.Version } } @@ -167,7 +168,7 @@ func (s *GraphMigrator) executeStepwiseMigrations(ctx context.Context) error { if errors.Is(err, ErrNoMigrationData) { currentVersion := version.GetVersion() - log.Infof(fmt.Sprintf("This is a new graph database. Creating a migration entry for GraphDB version %s", currentVersion)) + slog.InfoContext(ctx, fmt.Sprintf("This is a new graph database. Creating a migration entry for GraphDB version %s", currentVersion)) return CreateMigrationData(ctx, s.db, currentMigration) } else { return fmt.Errorf("unable to get graph db migration data: %w", err) diff --git a/cmd/api/src/migrations/manifest.go b/cmd/api/src/migrations/manifest.go index 85f1e88590..641ed4ee2a 100644 --- a/cmd/api/src/migrations/manifest.go +++ b/cmd/api/src/migrations/manifest.go @@ -139,7 +139,7 @@ func Version_513_Migration(db graph.Database) error { } } - log.Infof(fmt.Sprintf("Migration removed all non-entity kinds from %d incorrectly labeled nodes", nodes.Len())) + slog.Info(fmt.Sprintf("Migration removed all non-entity kinds from %d incorrectly labeled nodes", nodes.Len())) return nil }); err != nil { return err @@ -240,11 +240,11 @@ func Version_277_Migration(db graph.Database) error { } if count++; count%10000 == 0 { - log.Infof(fmt.Sprintf("Completed %d nodes in migration", count)) + slog.Info(fmt.Sprintf("Completed %d nodes in migration", count)) } } - log.Infof(fmt.Sprintf("Completed %d nodes in migration", count)) + slog.Info(fmt.Sprintf("Completed %d nodes in migration", count)) return cursor.Error() }); err != nil { return err diff --git a/cmd/api/src/model/samlprovider.go b/cmd/api/src/model/samlprovider.go index e0455f7487..5c95e6ddfa 100644 --- a/cmd/api/src/model/samlprovider.go +++ b/cmd/api/src/model/samlprovider.go @@ -19,6 +19,7 @@ package model import ( "errors" "fmt" + "log/slog" "net/url" "path" @@ -154,7 +155,7 @@ func (s SAMLProvider) GetSAMLUserPrincipalNameFromAssertion(assertion *saml.Asse for _, attrStmt := range assertion.AttributeStatements { for _, attr := range attrStmt.Attributes { for _, value := range attr.Values { - log.Infof(fmt.Sprintf("[SAML] Assertion contains attribute: %s - %s=%v", attr.NameFormat, attr.Name, value)) + slog.Info(fmt.Sprintf("[SAML] Assertion contains attribute: %s - %s=%v", attr.NameFormat, attr.Name, value)) } } } diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index 10ed4f374f..c827ce95e2 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -630,7 +630,7 @@ func (s *GraphQuery) GetEntityCountResults(ctx context.Context, node *graph.Node for delegateKey, delegate := range delegates { waitGroup.Add(1) - log.Infof(fmt.Sprintf("Running entity query %s", delegateKey)) + slog.InfoContext(ctx, fmt.Sprintf("Running entity query %s", delegateKey)) go func(delegateKey string, delegate any) { defer waitGroup.Done() @@ -790,7 +790,7 @@ func (s *GraphQuery) cacheQueryResult(queryStart time.Time, cacheKey string, res } else if !set { log.Warnf(fmt.Sprintf("[Entity Results Cache] Cache entry for query %s not set because it already exists", cacheKey)) } else { - log.Infof(fmt.Sprintf("[Entity Results Cache] Cached slow query %s (%d bytes) because it took %dms", cacheKey, sizeInBytes, queryTime)) + slog.Info(fmt.Sprintf("[Entity Results Cache] Cached slow query %s (%d bytes) because it took %dms", cacheKey, sizeInBytes, queryTime)) } } } diff --git a/cmd/api/src/services/dataquality/dataquality.go b/cmd/api/src/services/dataquality/dataquality.go index 6e8dd69c4a..14f2b5f2d0 100644 --- a/cmd/api/src/services/dataquality/dataquality.go +++ b/cmd/api/src/services/dataquality/dataquality.go @@ -23,7 +23,6 @@ import ( "log/slog" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/analysis/ad" "github.com/specterops/bloodhound/src/analysis/azure" @@ -38,7 +37,7 @@ type DataQualityData interface { } func SaveDataQuality(ctx context.Context, db DataQualityData, graphDB graph.Database) error { - log.Infof(fmt.Sprintf("Started Data Quality Stats Collection")) + slog.InfoContext(ctx, fmt.Sprintf("Started Data Quality Stats Collection")) defer measure.ContextMeasure(ctx, slog.LevelInfo, "Successfully Completed Data Quality Stats Collection")() if stats, aggregation, err := ad.GraphStats(ctx, graphDB); err != nil { diff --git a/cmd/api/src/services/entrypoint.go b/cmd/api/src/services/entrypoint.go index e7f7ac4d51..49e8403fb5 100644 --- a/cmd/api/src/services/entrypoint.go +++ b/cmd/api/src/services/entrypoint.go @@ -19,6 +19,7 @@ package services import ( "context" "fmt" + "log/slog" "time" "github.com/specterops/bloodhound/cache" @@ -83,7 +84,7 @@ func Entrypoint(ctx context.Context, cfg config.Configuration, connections boots } else if err := connections.Graph.SetDefaultGraph(ctx, schema.DefaultGraph()); err != nil { return nil, fmt.Errorf("no default graph found but migrations are disabled per configuration: %w", err) } else { - log.Infof(fmt.Sprintf("Database migrations are disabled per configuration")) + slog.InfoContext(ctx, fmt.Sprintf("Database migrations are disabled per configuration")) } if apiCache, err := cache.NewCache(cache.Config{MaxSize: cfg.MaxAPICacheSize}); err != nil { diff --git a/packages/go/analysis/ad/adcscache.go b/packages/go/analysis/ad/adcscache.go index b2be8ca8e3..f53f3b261a 100644 --- a/packages/go/analysis/ad/adcscache.go +++ b/packages/go/analysis/ad/adcscache.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/dawgs/cardinality" @@ -147,7 +148,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris log.Errorf(fmt.Sprintf("Error building adcs cache %v", err)) } - log.Infof(fmt.Sprintf("Finished building adcs cache")) + slog.InfoContext(ctx, "Finished building adcs cache") } func (s *ADCSCache) DoesCAChainProperlyToDomain(enterpriseCA, domain *graph.Node) bool { diff --git a/packages/go/analysis/ad/membership.go b/packages/go/analysis/ad/membership.go index 75f732d4fd..5024c8ae18 100644 --- a/packages/go/analysis/ad/membership.go +++ b/packages/go/analysis/ad/membership.go @@ -29,7 +29,6 @@ import ( "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" ) @@ -64,7 +63,7 @@ func ResolveAllGroupMemberships(ctx context.Context, db graph.Database, addition return memberships, err } - log.Infof(fmt.Sprintf("Collected %d groups to resolve", len(adGroupIDs))) + slog.InfoContext(ctx, fmt.Sprintf("Collected %d groups to resolve", len(adGroupIDs))) for _, adGroupID := range adGroupIDs { if traversalMap.Contains(adGroupID.Uint64()) { diff --git a/packages/go/analysis/ad/post.go b/packages/go/analysis/ad/post.go index b1080cef7d..4530d33263 100644 --- a/packages/go/analysis/ad/post.go +++ b/packages/go/analysis/ad/post.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/RoaringBitmap/roaring/roaring64" "github.com/specterops/bloodhound/analysis" @@ -252,7 +253,7 @@ func PostLocalGroups(ctx context.Context, db graph.Database, localGroupExpansion computerID := graph.ID(computer) if idx > 0 && idx%10000 == 0 { - log.Infof(fmt.Sprintf("Post processed %d active directory computers", idx)) + slog.InfoContext(ctx, fmt.Sprintf("Post processed %d active directory computers", idx)) } if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -344,7 +345,7 @@ func PostLocalGroups(ctx context.Context, db graph.Database, localGroupExpansion } } - log.Infof(fmt.Sprintf("Finished post-processing %d active directory computers", computers.GetCardinality())) + slog.InfoContext(ctx, fmt.Sprintf("Finished post-processing %d active directory computers", computers.GetCardinality())) return &operation.Stats, operation.Done() } } @@ -483,7 +484,7 @@ func FetchLocalGroupBitmapForComputer(tx graph.Transaction, computer graph.ID, s } func ExpandAllRDPLocalGroups(ctx context.Context, db graph.Database) (impact.PathAggregator, error) { - log.Infof(fmt.Sprintf("Expanding all AD group and local group memberships")) + slog.InfoContext(ctx, fmt.Sprintf("Expanding all AD group and local group memberships")) return ResolveAllGroupMemberships(ctx, db, query.Not( query.Or( diff --git a/packages/go/analysis/ad/queries.go b/packages/go/analysis/ad/queries.go index 3e1a37f28a..9afc3b5bd1 100644 --- a/packages/go/analysis/ad/queries.go +++ b/packages/go/analysis/ad/queries.go @@ -33,7 +33,6 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" ) @@ -1516,7 +1515,7 @@ func FetchUserSessionCompleteness(tx graph.Transaction, domainSIDs ...string) (f func FetchAllGroupMembers(ctx context.Context, db graph.Database, targets graph.NodeSet) (graph.NodeSet, error) { defer measure.ContextMeasure(ctx, slog.LevelInfo, "FetchAllGroupMembers")() - log.Infof(fmt.Sprintf("Fetching group members for %d AD nodes", len(targets))) + slog.InfoContext(ctx, fmt.Sprintf("Fetching group members for %d AD nodes", len(targets))) allGroupMembers := graph.NewNodeSet() @@ -1530,7 +1529,7 @@ func FetchAllGroupMembers(ctx context.Context, db graph.Database, targets graph. } } - log.Infof(fmt.Sprintf("Collected %d group members", len(allGroupMembers))) + slog.InfoContext(ctx, fmt.Sprintf("Collected %d group members", len(allGroupMembers))) return allGroupMembers, nil } diff --git a/packages/go/cypher/models/pgsql/translate/expression.go b/packages/go/cypher/models/pgsql/translate/expression.go index b62c54443d..a76020933d 100644 --- a/packages/go/cypher/models/pgsql/translate/expression.go +++ b/packages/go/cypher/models/pgsql/translate/expression.go @@ -18,8 +18,7 @@ package translate import ( "fmt" - - "github.com/specterops/bloodhound/log" + "log/slog" "github.com/specterops/bloodhound/cypher/models/pgsql" "github.com/specterops/bloodhound/cypher/models/walk" @@ -256,7 +255,7 @@ func InferExpressionType(expression pgsql.Expression) (pgsql.DataType, error) { return InferExpressionType(typedExpression.Expression) default: - log.Infof(fmt.Sprintf("unable to infer type hint for expression type: %T", expression)) + slog.Info(fmt.Sprintf("unable to infer type hint for expression type: %T", expression)) return pgsql.UnknownDataType, nil } } diff --git a/packages/go/dawgs/drivers/neo4j/index.go b/packages/go/dawgs/drivers/neo4j/index.go index 6588034c81..8a416d04ab 100644 --- a/packages/go/dawgs/drivers/neo4j/index.go +++ b/packages/go/dawgs/drivers/neo4j/index.go @@ -19,10 +19,10 @@ package neo4j import ( "context" "fmt" + "log/slog" "strings" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" ) const ( @@ -122,7 +122,7 @@ func indexTypeProvider(indexType graph.IndexType) string { func assertIndexes(ctx context.Context, db graph.Database, indexesToRemove []string, indexesToAdd map[string]neo4jIndex) error { if err := db.WriteTransaction(ctx, func(tx graph.Transaction) error { for _, indexToRemove := range indexesToRemove { - log.Infof(fmt.Sprintf("Removing index %s", indexToRemove)) + slog.InfoContext(ctx, fmt.Sprintf("Removing index %s", indexToRemove)) result := tx.Raw(strings.Replace(dropPropertyIndexStatement, "$name", indexToRemove, 1), nil) result.Close() @@ -139,7 +139,7 @@ func assertIndexes(ctx context.Context, db graph.Database, indexesToRemove []str return db.WriteTransaction(ctx, func(tx graph.Transaction) error { for indexName, indexToAdd := range indexesToAdd { - log.Infof(fmt.Sprintf("Adding index %s to labels %s on properties %s using %s", indexName, indexToAdd.kind.String(), indexToAdd.Field, indexTypeProvider(indexToAdd.Type))) + slog.InfoContext(ctx, fmt.Sprintf("Adding index %s to labels %s on properties %s using %s", indexName, indexToAdd.kind.String(), indexToAdd.Field, indexTypeProvider(indexToAdd.Type))) if err := db.Run(ctx, createPropertyIndexStatement, map[string]interface{}{ "name": indexName, diff --git a/packages/go/dawgs/drivers/neo4j/transaction.go b/packages/go/dawgs/drivers/neo4j/transaction.go index 83d863eed3..aaa85fa090 100644 --- a/packages/go/dawgs/drivers/neo4j/transaction.go +++ b/packages/go/dawgs/drivers/neo4j/transaction.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "sort" "strings" @@ -336,7 +337,7 @@ func (s *neo4jTransaction) Raw(stmt string, params map[string]any) graph.Result } } - log.Info().Str("dawgs_db_driver", DriverName).Msgf("%s - %s", stmt, prettyParameters.String()) + slog.Info(fmt.Sprintf("%s - %s", stmt, prettyParameters.String()), "dawgs_db_driver", DriverName) } driverResult, err := s.currentTx().Run(stmt, params) diff --git a/packages/go/dawgs/drivers/pg/batch.go b/packages/go/dawgs/drivers/pg/batch.go index 3d6f3bdcde..32af7dfcca 100644 --- a/packages/go/dawgs/drivers/pg/batch.go +++ b/packages/go/dawgs/drivers/pg/batch.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "fmt" + "log/slog" "strconv" "strings" @@ -29,7 +30,6 @@ import ( "github.com/specterops/bloodhound/dawgs/drivers/pg/model" sql "github.com/specterops/bloodhound/dawgs/drivers/pg/query" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" ) type Int2ArrayEncoder struct { @@ -502,7 +502,7 @@ func (s *batch) flushRelationshipCreateBuffer() error { } else if graphTarget, err := s.innerTransaction.getTargetGraph(); err != nil { return err } else if _, err := s.innerTransaction.tx.Exec(s.ctx, createEdgeBatchStatement, graphTarget.ID, createBatch.startIDs, createBatch.endIDs, createBatch.edgeKindIDs, createBatch.edgePropertyBags); err != nil { - log.Infof(fmt.Sprintf("Num merged property bags: %d - Num edge keys: %d - StartID batch size: %d", len(batchBuilder.edgePropertiesIndex), len(batchBuilder.keyToEdgeID), len(batchBuilder.relationshipUpdateBatch.startIDs))) + slog.Info(fmt.Sprintf("Num merged property bags: %d - Num edge keys: %d - StartID batch size: %d", len(batchBuilder.edgePropertiesIndex), len(batchBuilder.keyToEdgeID), len(batchBuilder.relationshipUpdateBatch.startIDs))) return err } diff --git a/packages/go/dawgs/drivers/pg/tooling.go b/packages/go/dawgs/drivers/pg/tooling.go index aa4728982e..32f9fdb65c 100644 --- a/packages/go/dawgs/drivers/pg/tooling.go +++ b/packages/go/dawgs/drivers/pg/tooling.go @@ -17,12 +17,11 @@ package pg import ( - "fmt" + "log/slog" "regexp" "sync" "github.com/specterops/bloodhound/dawgs/drivers" - "github.com/specterops/bloodhound/log" ) type IterationOptions interface { @@ -54,7 +53,7 @@ type queryHook struct { func (s *queryHook) Execute(query string, arguments ...any) { switch s.action { case actionTrace: - log.Infof(fmt.Sprintf("Here")) + slog.Info("Here") } } diff --git a/packages/go/log/cmd/logtest/main.go b/packages/go/log/cmd/logtest/main.go index fd5f84cfcc..7f072cd3a6 100644 --- a/packages/go/log/cmd/logtest/main.go +++ b/packages/go/log/cmd/logtest/main.go @@ -18,14 +18,11 @@ package main import ( "fmt" - - "github.com/specterops/bloodhound/log" + "log/slog" ) func main() { - log.Infof(fmt.Sprintf("This is an info log message: %s", "test")) - log.Warnf(fmt.Sprintf("This is a warning log message: %s", "test")) - log.Errorf(fmt.Sprintf("This is a error log message: %s", "test")) - log.Fatalf(fmt.Sprintf("This is a fatal log message and will kill the application with exit 1: %s", "test")) - log.Errorf(fmt.Sprintf("This should never be seen, the Fatalf call is broken!")) + slog.Info(fmt.Sprintf("This is an info log message: %s", "test")) + slog.Warn(fmt.Sprintf("This is a warning log message: %s", "test")) + slog.Error(fmt.Sprintf("This is a error log message: %s", "test")) } diff --git a/packages/go/log/handlers/handlers.go b/packages/go/log/handlers/handlers.go index f37bcaa3c9..a3e962cacf 100644 --- a/packages/go/log/handlers/handlers.go +++ b/packages/go/log/handlers/handlers.go @@ -20,6 +20,7 @@ import ( "context" "log/slog" "os" + "runtime" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -76,3 +77,33 @@ func SetGlobalLevel(level slog.Level) { func GlobalLevel() slog.Level { return lvl.Level() } + +type stackFrame struct { + File string `json:"file"` + Line int `json:"line"` + Func string `json:"func"` +} + +func GetSlogCallStack() slog.Attr { + var outputFrames []stackFrame + + pc := make([]uintptr, 25) + n := runtime.Callers(1, pc) + if n == 0 { + return slog.Attr{} + } + pc = pc[:n] // pass only valid pcs to runtime.CallersFrames + frames := runtime.CallersFrames(pc) + + for { + frame, more := frames.Next() + + outputFrames = append(outputFrames, stackFrame{File: frame.File, Line: frame.Line, Func: frame.Function}) + + if !more { + break + } + } + + return slog.Any("stack", outputFrames) +} diff --git a/packages/go/log/log.go b/packages/go/log/log.go index fb334d5de0..bfbd89488f 100644 --- a/packages/go/log/log.go +++ b/packages/go/log/log.go @@ -18,13 +18,10 @@ package log import ( "fmt" - "os" - "strings" - "sync/atomic" - "time" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "os" + "strings" ) // Level is a type alias that represents a log verbosity level. @@ -213,41 +210,3 @@ func Trace() Event { func Tracef(format string, args ...any) { Trace().Msgf(format, args...) } - -// Measure is a convenience function that returns a deferrable function that will add a runtime duration to the log -// event. The time measurement begins on instantiation of the returned deferrable function and ends upon call of said -// function. -func Measure(level Level, format string, args ...any) func() { - then := time.Now() - - return func() { - if elapsed := time.Since(then); elapsed >= measureThreshold { - WithLevel(level).Duration(FieldElapsed, elapsed).Msgf(format, args...) - } - } -} - -var ( - logMeasurePairCounter = atomic.Uint64{} - measureThreshold = time.Second -) - -func SetMeasureThreshold(newMeasureThreshold time.Duration) { - measureThreshold = newMeasureThreshold -} - -func LogAndMeasure(level Level, format string, args ...any) func() { - var ( - pairID = logMeasurePairCounter.Add(1) - message = fmt.Sprintf(format, args...) - then = time.Now() - ) - - WithLevel(level).Uint64(FieldMeasurementID, pairID).Msg(message) - - return func() { - if elapsed := time.Since(then); elapsed >= measureThreshold { - WithLevel(level).Duration(FieldElapsed, elapsed).Uint64(FieldMeasurementID, pairID).Msg(message) - } - } -} diff --git a/packages/go/schemagen/main.go b/packages/go/schemagen/main.go index 761b5c0260..384219dea7 100644 --- a/packages/go/schemagen/main.go +++ b/packages/go/schemagen/main.go @@ -18,6 +18,7 @@ package main import ( "fmt" + "log/slog" "os" "path/filepath" @@ -76,7 +77,7 @@ func main() { if projectRoot, err := generator.FindGolangWorkspaceRoot(); err != nil { log.Fatalf(fmt.Sprintf("Error finding project root: %v", err)) } else { - log.Infof(fmt.Sprintf("Project root is %s", projectRoot)) + slog.Info(fmt.Sprintf("Project root is %s", projectRoot)) if err := cfgBuilder.OverlayPath(filepath.Join(projectRoot, "packages/cue")); err != nil { log.Fatalf(fmt.Sprintf("Error: %v", err)) diff --git a/packages/go/stbernard/analyzers/js/js.go b/packages/go/stbernard/analyzers/js/js.go index ff565cdd08..2a6841df96 100644 --- a/packages/go/stbernard/analyzers/js/js.go +++ b/packages/go/stbernard/analyzers/js/js.go @@ -21,9 +21,9 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "os/exec" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/analyzers/codeclimate" "github.com/specterops/bloodhound/packages/go/stbernard/cmdrunner" "github.com/specterops/bloodhound/packages/go/stbernard/environment" @@ -51,7 +51,7 @@ func Run(jsPaths []string, env environment.Environment) ([]codeclimate.Entry, er result = make([]codeclimate.Entry, 0, len(jsPaths)) ) - log.Infof(fmt.Sprintf("Running eslint")) + slog.Info("Running eslint") for _, path := range jsPaths { entries, err := runEslint(path, env) @@ -63,7 +63,7 @@ func Run(jsPaths []string, env environment.Environment) ([]codeclimate.Entry, er result = append(result, entries...) } - log.Infof(fmt.Sprintf("Completed eslint")) + slog.Info("Completed eslint") return result, exitError } diff --git a/packages/go/stbernard/cmdrunner/cmdrunner.go b/packages/go/stbernard/cmdrunner/cmdrunner.go index 975c6a8336..999fffb78d 100644 --- a/packages/go/stbernard/cmdrunner/cmdrunner.go +++ b/packages/go/stbernard/cmdrunner/cmdrunner.go @@ -19,6 +19,7 @@ package cmdrunner import ( "errors" "fmt" + "log/slog" "os" "os/exec" "strings" @@ -64,7 +65,7 @@ func Run(command string, args []string, path string, env environment.Environment } } - log.Infof(fmt.Sprintf("Running %s for %s", cmdstr, path)) + slog.Info(fmt.Sprintf("Running %s for %s", cmdstr, path)) err := cmd.Run() if _, ok := err.(*exec.ExitError); ok { @@ -73,7 +74,7 @@ func Run(command string, args []string, path string, env environment.Environment return fmt.Errorf("%s: %w", cmdstr, err) } - log.Infof(fmt.Sprintf("Finished %s for %s", cmdstr, path)) + slog.Info(fmt.Sprintf("Finished %s for %s", cmdstr, path)) return exitErr } diff --git a/packages/go/stbernard/command/tester/tester.go b/packages/go/stbernard/command/tester/tester.go index 0f3d3d553e..05c758fd81 100644 --- a/packages/go/stbernard/command/tester/tester.go +++ b/packages/go/stbernard/command/tester/tester.go @@ -20,6 +20,7 @@ import ( "flag" "fmt" "io/fs" + "log/slog" "os" "path/filepath" @@ -106,7 +107,7 @@ func (s *command) runTests(cwd string, coverPath string, modPaths []string) erro } if !s.yarnOnly { - log.Infof(fmt.Sprintf("Checking coverage directory")) + slog.Info(fmt.Sprintf("Checking coverage directory")) if err := os.MkdirAll(coverPath, os.ModeDir+fs.ModePerm); err != nil { return fmt.Errorf("making coverage directory: %w", err) } else if dirList, err := os.ReadDir(coverPath); err != nil { diff --git a/packages/go/stbernard/environment/environment.go b/packages/go/stbernard/environment/environment.go index d77ba753d5..d01f4bf1d3 100644 --- a/packages/go/stbernard/environment/environment.go +++ b/packages/go/stbernard/environment/environment.go @@ -18,10 +18,9 @@ package environment import ( "fmt" + "log/slog" "os" "strings" - - "github.com/specterops/bloodhound/log" ) const ( @@ -56,7 +55,7 @@ func (s Environment) SetIfEmpty(key string, value string) { // Overrides an environment variable with a new value func (s Environment) Override(key string, value string) { - log.Infof(fmt.Sprintf("Overriding environment variable %s with %s", key, value)) + slog.Info(fmt.Sprintf("Overriding environment variable %s with %s", key, value)) s[key] = value } diff --git a/packages/go/stbernard/git/git.go b/packages/go/stbernard/git/git.go index bf5bd6348b..4264dfccb8 100644 --- a/packages/go/stbernard/git/git.go +++ b/packages/go/stbernard/git/git.go @@ -20,6 +20,7 @@ import ( "bytes" "errors" "fmt" + "log/slog" "os" "os/exec" "path/filepath" @@ -80,7 +81,7 @@ func CheckClean(cwd string, env environment.Environment) (bool, error) { cmd.Stderr = os.Stderr } - log.Infof(fmt.Sprintf("Checking repository clean for %s", cwd)) + slog.Info(fmt.Sprintf("Checking repository clean for %s", cwd)) // We need to run git status first to ensure we don't hit a cache issue if err := cmdrunner.Run("git", []string{"status"}, cwd, env, func(c *exec.Cmd) { c.Stdout = nil }); err != nil { @@ -93,7 +94,7 @@ func CheckClean(cwd string, env environment.Environment) (bool, error) { } } - log.Infof(fmt.Sprintf("Finished checking repository clean for %s", cwd)) + slog.Info(fmt.Sprintf("Finished checking repository clean for %s", cwd)) return true, nil } @@ -171,13 +172,13 @@ func getAllVersionTags(cwd string, env environment.Environment) ([]string, error cmd.Stderr = os.Stderr } - log.Infof(fmt.Sprintf("Listing tags for %v", cwd)) + slog.Info(fmt.Sprintf("Listing tags for %v", cwd)) if err := cmd.Run(); err != nil { return nil, fmt.Errorf("git tag --list v*: %w", err) } - log.Infof(fmt.Sprintf("Finished listing tags for %v", cwd)) + slog.Info(fmt.Sprintf("Finished listing tags for %v", cwd)) return strings.Split(output.String(), "\n"), nil } diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index df688d13fd..719b2b9473 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -21,6 +21,7 @@ package main import ( "errors" "fmt" + "log/slog" "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/command" @@ -53,6 +54,6 @@ func main() { } else if err := cmd.Run(); err != nil { log.Fatalf(fmt.Sprintf("Failed to run command `%s`: %v", cmd.Name(), err)) } else { - log.Infof(fmt.Sprintf("Command `%s` completed successfully", cmd.Name())) + slog.Info(fmt.Sprintf("Command `%s` completed successfully", cmd.Name())) } } diff --git a/packages/go/stbernard/workspace/golang/build.go b/packages/go/stbernard/workspace/golang/build.go index 6e7fb09f53..6e44115f1b 100644 --- a/packages/go/stbernard/workspace/golang/build.go +++ b/packages/go/stbernard/workspace/golang/build.go @@ -19,6 +19,7 @@ package golang import ( "errors" "fmt" + "log/slog" "path/filepath" "strings" "sync" @@ -50,7 +51,7 @@ func BuildMainPackages(workRoot string, modPaths []string, env environment.Envir version = *parsedVersion } - log.Infof(fmt.Sprintf("Building for version %s", version.Original())) + slog.Info(fmt.Sprintf("Building for version %s", version.Original())) for _, modPath := range modPaths { wg.Add(1) @@ -104,7 +105,7 @@ func buildModuleMainPackages(buildDir string, modPath string, version semver.Ver mu.Unlock() } - log.Infof(fmt.Sprintf("Built package %s", p.Import)) + slog.Info(fmt.Sprintf("Built package %s", p.Import)) }(pkg) } } From d296c1af5079ad99815eb3cf8ae0a27a670a760b Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 11:57:38 -0500 Subject: [PATCH 10/20] BED-4153: Migrate log.Error --- cmd/api/src/analysis/ad/queries.go | 4 +- cmd/api/src/analysis/azure/queries.go | 4 +- cmd/api/src/api/auth.go | 6 +- cmd/api/src/api/error.go | 6 +- cmd/api/src/api/marshalling.go | 17 +-- cmd/api/src/api/middleware/logging.go | 4 +- cmd/api/src/api/middleware/middleware.go | 3 +- cmd/api/src/api/static/static.go | 7 +- cmd/api/src/api/tools/pg.go | 13 +- cmd/api/src/api/v2/auth/auth.go | 14 +-- cmd/api/src/api/v2/auth/login.go | 4 +- cmd/api/src/api/v2/auth/oidc.go | 14 +-- cmd/api/src/api/v2/auth/saml.go | 30 ++--- cmd/api/src/api/v2/collectors.go | 12 +- cmd/api/src/api/v2/cypherquery.go | 4 +- cmd/api/src/api/v2/database_wipe.go | 9 +- cmd/api/src/api/v2/integration/api.go | 4 +- cmd/api/src/auth/model.go | 6 +- cmd/api/src/config/config.go | 6 +- cmd/api/src/daemons/api/bhapi/api.go | 5 +- cmd/api/src/daemons/api/toolapi/api.go | 5 +- cmd/api/src/daemons/daemon.go | 4 +- cmd/api/src/daemons/datapipe/agi.go | 11 +- cmd/api/src/daemons/datapipe/analysis.go | 4 +- .../src/daemons/datapipe/azure_convertors.go | 111 +++++++++--------- cmd/api/src/daemons/datapipe/cleanup.go | 4 +- cmd/api/src/daemons/datapipe/datapipe.go | 29 +++-- cmd/api/src/daemons/datapipe/decoders.go | 10 +- cmd/api/src/daemons/datapipe/ingest.go | 16 +-- cmd/api/src/daemons/datapipe/jobs.go | 41 +++---- cmd/api/src/database/analysisrequest.go | 5 +- cmd/api/src/database/db.go | 8 +- cmd/api/src/migrations/manifest.go | 7 +- cmd/api/src/model/audit.go | 4 +- cmd/api/src/queries/graph.go | 8 +- cmd/api/src/services/agi/agi.go | 3 +- .../src/services/fileupload/file_upload.go | 16 +-- packages/go/analysis/ad/ad.go | 15 ++- packages/go/analysis/ad/adcs.go | 23 ++-- packages/go/analysis/ad/adcscache.go | 18 +-- packages/go/analysis/ad/esc1.go | 7 +- packages/go/analysis/ad/esc13.go | 5 +- packages/go/analysis/ad/esc3.go | 41 +++---- packages/go/analysis/ad/esc_shared.go | 3 +- packages/go/analysis/azure/application.go | 6 +- packages/go/analysis/azure/post.go | 19 +-- packages/go/analysis/azure/queries.go | 3 +- .../go/analysis/azure/service_principal.go | 3 +- packages/go/dawgs/drivers/neo4j/cypher.go | 6 +- .../go/dawgs/drivers/neo4j/transaction.go | 4 +- packages/go/ein/ad.go | 11 +- packages/go/ein/azure.go | 15 +-- packages/go/log/handlers/handlers.go | 4 +- packages/go/stbernard/main.go | 2 +- 54 files changed, 323 insertions(+), 320 deletions(-) diff --git a/cmd/api/src/analysis/ad/queries.go b/cmd/api/src/analysis/ad/queries.go index 3686e567e1..2665d940b0 100644 --- a/cmd/api/src/analysis/ad/queries.go +++ b/cmd/api/src/analysis/ad/queries.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "sync" "github.com/gofrs/uuid" @@ -29,7 +30,6 @@ import ( "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/database/types/nan" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/queries" @@ -87,7 +87,7 @@ func GraphStats(ctx context.Context, db graph.Database) (model.ADDataQualityStat } else { for _, domain := range domains { if domainSID, err := domain.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Domain node %d does not have a valid %s property: %v", domain.ID, common.ObjectID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Domain node %d does not have a valid %s property: %v", domain.ID, common.ObjectID, err)) } else { aggregation.Domains++ diff --git a/cmd/api/src/analysis/azure/queries.go b/cmd/api/src/analysis/azure/queries.go index 309a574409..9be76367a6 100644 --- a/cmd/api/src/analysis/azure/queries.go +++ b/cmd/api/src/analysis/azure/queries.go @@ -19,6 +19,7 @@ package azure import ( "context" "fmt" + "log/slog" "sync" "github.com/gofrs/uuid" @@ -28,7 +29,6 @@ import ( "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/model" ) @@ -55,7 +55,7 @@ func GraphStats(ctx context.Context, db graph.Database) (model.AzureDataQualityS } else { for _, tenant := range tenants { if tenantObjectID, err := tenant.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err)) } else { aggregation.Tenants++ diff --git a/cmd/api/src/api/auth.go b/cmd/api/src/api/auth.go index b950a61906..c06f5a4a5a 100644 --- a/cmd/api/src/api/auth.go +++ b/cmd/api/src/api/auth.go @@ -141,7 +141,7 @@ func (s authenticator) LoginWithSecret(ctx context.Context, loginRequest LoginRe auditLogFields := types.JSONUntypedObject{"username": loginRequest.Username, "auth_type": auth.ProviderTypeSecret} if commitID, err := uuid.NewV4(); err != nil { - log.Errorf(fmt.Sprintf("Error generating commit ID for login: %s", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error generating commit ID for login: %s", err)) return LoginDetails{}, err } else { s.auditLogin(ctx, commitID, model.AuditLogStatusIntent, model.User{}, auditLogFields) @@ -282,7 +282,7 @@ func (s authenticator) ValidateRequestSignature(tokenID uuid.UUID, request *http authToken.LastAccess = time.Now().UTC() if err := s.db.UpdateAuthToken(request.Context(), authToken); err != nil { - log.Errorf(fmt.Sprintf("Error updating last access on AuthToken: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error updating last access on AuthToken: %v", err)) } if sdtf, ok := readCloser.(*SelfDestructingTempFile); ok { @@ -363,7 +363,7 @@ func (s authenticator) CreateSSOSession(request *http.Request, response http.Res // Generate commit ID for audit logging if commitID, err = uuid.NewV4(); err != nil { - log.Errorf(fmt.Sprintf("Error generating commit ID for login: %s", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error generating commit ID for login: %s", err)) WriteErrorResponse(requestCtx, BuildErrorResponse(http.StatusInternalServerError, "audit log creation failure", request), response) return } diff --git a/cmd/api/src/api/error.go b/cmd/api/src/api/error.go index cb61ebe438..3e7ea9dcaa 100644 --- a/cmd/api/src/api/error.go +++ b/cmd/api/src/api/error.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "strings" "time" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/ctx" "github.com/specterops/bloodhound/src/database" ) @@ -129,7 +129,7 @@ func HandleDatabaseError(request *http.Request, response http.ResponseWriter, er } else if errors.Is(err, context.DeadlineExceeded) { WriteErrorResponse(request.Context(), BuildErrorResponse(http.StatusInternalServerError, ErrorResponseRequestTimeout, request), response) } else { - log.Errorf(fmt.Sprintf("Unexpected database error: %v", err)) + slog.Error(fmt.Sprintf("Unexpected database error: %v", err)) WriteErrorResponse(request.Context(), BuildErrorResponse(http.StatusInternalServerError, ErrorResponseDetailsInternalServerError, request), response) } } @@ -140,7 +140,7 @@ func FormatDatabaseError(err error) error { if errors.Is(err, database.ErrNotFound) { return errors.New(ErrorResponseDetailsResourceNotFound) } else { - log.Errorf(fmt.Sprintf("Unexpected database error: %v", err)) + slog.Error(fmt.Sprintf("Unexpected database error: %v", err)) return errors.New(ErrorResponseDetailsInternalServerError) } } diff --git a/cmd/api/src/api/marshalling.go b/cmd/api/src/api/marshalling.go index dd5cb40471..40d782c560 100644 --- a/cmd/api/src/api/marshalling.go +++ b/cmd/api/src/api/marshalling.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "time" @@ -94,7 +95,7 @@ func WriteErrorResponse(ctx context.Context, untypedError any, response http.Res func WriteBasicResponse(ctx context.Context, inputData any, statusCode int, response http.ResponseWriter) { if data, err := ToJSONRawMessage(inputData); err != nil { - log.Errorf(fmt.Sprintf("Failed marshaling data for basic response: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed marshaling data for basic response: %v", err)) response.WriteHeader(http.StatusInternalServerError) } else { WriteJSONResponse(ctx, BasicResponse{ @@ -158,35 +159,35 @@ func WriteResponseWrapperWithTimeWindowAndPagination(ctx context.Context, data a WriteJSONResponse(ctx, wrapper, statusCode, response) } -func WriteJSONResponse(_ context.Context, message any, statusCode int, response http.ResponseWriter) { +func WriteJSONResponse(ctx context.Context, message any, statusCode int, response http.ResponseWriter) { response.Header().Set(headers.ContentType.String(), mediatypes.ApplicationJson.String()) if content, err := json.Marshal(message); err != nil { - log.Errorf(fmt.Sprintf("Failed to marshal value into JSON for request: %v: for message: %+v", err, message)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to marshal value into JSON for request: %v: for message: %+v", err, message)) response.WriteHeader(http.StatusInternalServerError) } else { response.WriteHeader(statusCode) if written, err := response.Write(content); err != nil { - log.Errorf(fmt.Sprintf("Writing API Error. Failed to write JSON response with %d bytes written and error: %v", written, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Writing API Error. Failed to write JSON response with %d bytes written and error: %v", written, err)) } } } -func WriteCSVResponse(_ context.Context, message model.CSVWriter, statusCode int, response http.ResponseWriter) { +func WriteCSVResponse(ctx context.Context, message model.CSVWriter, statusCode int, response http.ResponseWriter) { response.Header().Set(headers.ContentType.String(), mediatypes.TextCsv.String()) response.WriteHeader(statusCode) if err := message.WriteCSV(response); err != nil { - log.Errorf(fmt.Sprintf("Writing API Error. Failed to write CSV for request: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Writing API Error. Failed to write CSV for request: %v", err)) } } -func WriteBinaryResponse(_ context.Context, data []byte, filename string, statusCode int, response http.ResponseWriter) { +func WriteBinaryResponse(ctx context.Context, data []byte, filename string, statusCode int, response http.ResponseWriter) { response.Header().Set(headers.ContentType.String(), mediatypes.ApplicationOctetStream.String()) response.Header().Set(headers.ContentDisposition.String(), fmt.Sprintf(utils.ContentDispositionAttachmentTemplate, filename)) response.WriteHeader(statusCode) if written, err := response.Write(data); err != nil { - log.Errorf(fmt.Sprintf("Writing API Error. Failed to write binary response with %d bytes written and error: %v", written, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Writing API Error. Failed to write binary response with %d bytes written and error: %v", written, err)) } } diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index 10996a3d1e..19d4116db8 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -38,7 +38,7 @@ func PanicHandler(next http.Handler) http.Handler { return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) { defer func() { if recovery := recover(); recovery != nil { - log.Errorf(fmt.Sprintf("[panic recovery] %s - [stack trace] %s", recovery, debug.Stack())) + slog.ErrorContext(request.Context(), fmt.Sprintf("[panic recovery] %s - [stack trace] %s", recovery, debug.Stack())) } }() @@ -136,7 +136,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http // assign a deadline, but only if a valid timeout has been supplied via the prefer header timeout, err := RequestWaitDuration(request) if err != nil { - log.Errorf(fmt.Sprintf("Error parsing prefer header for timeout: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error parsing prefer header for timeout: %v", err)) } else if err == nil && timeout > 0 { deadline = time.Now().Add(timeout * time.Second) } diff --git a/cmd/api/src/api/middleware/middleware.go b/cmd/api/src/api/middleware/middleware.go index a60c7f38de..a22b744b38 100644 --- a/cmd/api/src/api/middleware/middleware.go +++ b/cmd/api/src/api/middleware/middleware.go @@ -19,6 +19,7 @@ package middleware import ( "context" "fmt" + "log/slog" "net" "net/http" "net/url" @@ -103,7 +104,7 @@ func ContextMiddleware(next http.Handler) http.Handler { ) if newUUID, err := uuid.NewV4(); err != nil { - log.Errorf(fmt.Sprintf("Failed generating a new request UUID: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Failed generating a new request UUID: %v", err)) requestID = "ERROR" } else { requestID = newUUID.String() diff --git a/cmd/api/src/api/static/static.go b/cmd/api/src/api/static/static.go index 17fd58a01b..89f87515ba 100644 --- a/cmd/api/src/api/static/static.go +++ b/cmd/api/src/api/static/static.go @@ -20,16 +20,15 @@ import ( "fmt" "io" "io/fs" + "log/slog" "mime" "net/http" "path/filepath" "strings" - "github.com/specterops/bloodhound/src/utils" - "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" + "github.com/specterops/bloodhound/src/utils" ) type AssetConfig struct { @@ -90,7 +89,7 @@ func serve(cfg AssetConfig, response http.ResponseWriter, request *http.Request) response.Header().Set(headers.StrictTransportSecurity.String(), utils.HSTSSetting) if _, err := io.Copy(response, fin); err != nil { - log.Errorf(fmt.Sprintf("Failed flushing static file content for asset %s to client: %v", assetPath, err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Failed flushing static file content for asset %s to client: %v", assetPath, err)) } } } diff --git a/cmd/api/src/api/tools/pg.go b/cmd/api/src/api/tools/pg.go index c2027da65f..370239995d 100644 --- a/cmd/api/src/api/tools/pg.go +++ b/cmd/api/src/api/tools/pg.go @@ -29,7 +29,6 @@ import ( "github.com/specterops/bloodhound/dawgs/drivers/pg" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/util/size" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/config" @@ -237,7 +236,7 @@ func (s *PGMigrator) SwitchPostgreSQL(response http.ResponseWriter, request *htt "error": fmt.Errorf("failed connecting to PostgreSQL: %w", err), }, http.StatusInternalServerError, response) } else if err := pgDB.AssertSchema(request.Context(), s.graphSchema); err != nil { - log.Errorf(fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) } else if err := SetGraphDriver(request.Context(), s.cfg, pg.DriverName); err != nil { api.WriteJSONResponse(request.Context(), map[string]any{ "error": fmt.Errorf("failed updating graph database driver preferences: %w", err), @@ -295,19 +294,19 @@ func (s *PGMigrator) startMigration() error { slog.InfoContext(ctx, fmt.Sprintf("Starting live migration from Neo4j to PostgreSQL")) if err := pgDB.AssertSchema(ctx, s.graphSchema); err != nil { - log.Errorf(fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) } else if err := migrateTypes(ctx, neo4jDB, pgDB); err != nil { - log.Errorf(fmt.Sprintf("Unable to migrate Neo4j kinds to PostgreSQL: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Unable to migrate Neo4j kinds to PostgreSQL: %v", err)) } else if nodeIDMappings, err := migrateNodes(ctx, neo4jDB, pgDB); err != nil { - log.Errorf(fmt.Sprintf("Failed importing nodes into PostgreSQL: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed importing nodes into PostgreSQL: %v", err)) } else if err := migrateEdges(ctx, neo4jDB, pgDB, nodeIDMappings); err != nil { - log.Errorf(fmt.Sprintf("Failed importing edges into PostgreSQL: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed importing edges into PostgreSQL: %v", err)) } else { slog.InfoContext(ctx, fmt.Sprintf("Migration to PostgreSQL completed successfully")) } if err := s.advanceState(stateIdle, stateMigrating, stateCanceling); err != nil { - log.Errorf(fmt.Sprintf("Database migration state management error: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Database migration state management error: %v", err)) } }(migrationCtx) } diff --git a/cmd/api/src/api/v2/auth/auth.go b/cmd/api/src/api/v2/auth/auth.go index 5f107b0368..b5ef6b7381 100644 --- a/cmd/api/src/api/v2/auth/auth.go +++ b/cmd/api/src/api/v2/auth/auth.go @@ -19,6 +19,7 @@ package auth import ( "context" "fmt" + "log/slog" "net/http" "slices" "strconv" @@ -30,7 +31,6 @@ import ( "github.com/pkg/errors" "github.com/pquerna/otp/totp" "github.com/specterops/bloodhound/crypto" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" v2 "github.com/specterops/bloodhound/src/api/v2" "github.com/specterops/bloodhound/src/auth" @@ -317,7 +317,7 @@ func (s ManagementResource) CreateUser(response http.ResponseWriter, request *ht api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, errs.Error(), request), response) return } else if secretDigest, err := s.secretDigester.Digest(createUserRequest.Secret); err != nil { - log.Errorf(fmt.Sprintf("Error while attempting to digest secret for user: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error while attempting to digest secret for user: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) return } else { @@ -340,7 +340,7 @@ func (s ManagementResource) CreateUser(response http.ResponseWriter, request *ht api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("SAML Provider ID must be a number: %v", err.Error()), request), response) return } else if samlProvider, err := s.db.GetSAMLProvider(request.Context(), samlProviderID); err != nil { - log.Errorf(fmt.Sprintf("Error while attempting to fetch SAML provider %s: %v", createUserRequest.SAMLProviderID, err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error while attempting to fetch SAML provider %s: %v", createUserRequest.SAMLProviderID, err)) api.HandleDatabaseError(request, response, err) return } else { @@ -551,7 +551,7 @@ func (s ManagementResource) PutUserAuthSecret(response http.ResponseWriter, requ passwordExpiration := appcfg.GetPasswordExpiration(request.Context(), s.db) if secretDigest, err := s.secretDigester.Digest(setUserSecretRequest.Secret); err != nil { - log.Errorf(fmt.Sprintf("Error while attempting to digest secret for user: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error while attempting to digest secret for user: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { authSecret.UserID = targetUser.ID @@ -749,11 +749,11 @@ func (s ManagementResource) DeleteAuthToken(response http.ResponseWriter, reques if err := s.db.AppendAuditLog(request.Context(), auditLogEntry); err != nil { // We want to keep err scoped because response trumps this error if errors.Is(err, database.ErrNotFound) { - log.Errorf(fmt.Sprintf("resource not found: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("resource not found: %v", err)) } else if errors.Is(err, context.DeadlineExceeded) { - log.Errorf(fmt.Sprintf("context deadline exceeded: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("context deadline exceeded: %v", err)) } else { - log.Errorf(fmt.Sprintf("unexpected database error: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("unexpected database error: %v", err)) } } } diff --git a/cmd/api/src/api/v2/auth/login.go b/cmd/api/src/api/v2/auth/login.go index 505f6c5620..201644fa96 100644 --- a/cmd/api/src/api/v2/auth/login.go +++ b/cmd/api/src/api/v2/auth/login.go @@ -20,10 +20,10 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "strings" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/config" @@ -55,7 +55,7 @@ func (s LoginResource) loginSecret(loginRequest api.LoginRequest, response http. } else if errors.Is(err, api.ErrUserDisabled) { api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusForbidden, err.Error(), request), response) } else { - log.Errorf(fmt.Sprintf("Error during authentication for request ID %s: %v", ctx.RequestID(request), err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error during authentication for request ID %s: %v", ctx.RequestID(request), err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } } else { diff --git a/cmd/api/src/api/v2/auth/oidc.go b/cmd/api/src/api/v2/auth/oidc.go index afa7ebd72e..176896216a 100644 --- a/cmd/api/src/api/v2/auth/oidc.go +++ b/cmd/api/src/api/v2/auth/oidc.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "log/slog" "mime" "net/http" "net/url" @@ -31,7 +32,6 @@ import ( "github.com/coreos/go-oidc/v3/oidc" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/api" v2 "github.com/specterops/bloodhound/src/api/v2" @@ -163,11 +163,11 @@ func (s ManagementResource) OIDCLoginHandler(response http.ResponseWriter, reque // SSO misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if state, err := config.GenerateRandomBase64String(77); err != nil { - log.Errorf(fmt.Sprintf("[OIDC] Failed to generate state: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] Failed to generate state: %v", err)) // Technical issues scenario v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else if provider, err := oidc.NewProvider(request.Context(), ssoProvider.OIDCProvider.Issuer); err != nil { - log.Errorf(fmt.Sprintf("[OIDC] Failed to create OIDC provider: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] Failed to create OIDC provider: %v", err)) // SSO misconfiguration or technical issue // Treat this as a misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") @@ -221,20 +221,20 @@ func (s ManagementResource) OIDCCallbackHandler(response http.ResponseWriter, re // Invalid state - treat as technical issue or misconfiguration v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else if provider, err := oidc.NewProvider(request.Context(), ssoProvider.OIDCProvider.Issuer); err != nil { - log.Errorf(fmt.Sprintf("[OIDC] Failed to create OIDC provider: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] Failed to create OIDC provider: %v", err)) // SSO misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if claims, err := getOIDCClaims(request.Context(), provider, ssoProvider, pkceVerifier, code[0]); err != nil { - log.Errorf(fmt.Sprintf("[OIDC] %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] %v", err)) v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else if email, err := getEmailFromOIDCClaims(claims); errors.Is(err, ErrEmailMissing) { // Note email claims are not always present so we will check different claim keys for possible email - log.Errorf(fmt.Sprintf("[OIDC] Claims did not contain any valid email address")) + slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] Claims did not contain any valid email address")) v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else { if ssoProvider.Config.AutoProvision.Enabled { if err := jitOIDCUserCreation(request.Context(), ssoProvider, email, claims, s.db); err != nil { // It is safe to let this request drop into the CreateSSOSession function below to ensure proper audit logging - log.Errorf(fmt.Sprintf("[OIDC] Error during JIT User Creation: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] Error during JIT User Creation: %v", err)) } } diff --git a/cmd/api/src/api/v2/auth/saml.go b/cmd/api/src/api/v2/auth/saml.go index f79c45a800..be71672942 100644 --- a/cmd/api/src/api/v2/auth/saml.go +++ b/cmd/api/src/api/v2/auth/saml.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "log/slog" "mime/multipart" "net/http" "strconv" @@ -32,7 +33,6 @@ import ( "github.com/gorilla/mux" "github.com/specterops/bloodhound/crypto" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/api" v2 "github.com/specterops/bloodhound/src/api/v2" @@ -345,12 +345,12 @@ func (s ManagementResource) ServeMetadata(response http.ResponseWriter, request } else { // Note: This is the samlsp metadata tied to authenticate flow and will not be the same as the XML metadata used to import the SAML provider initially if content, err := xml.MarshalIndent(serviceProvider.Metadata(), "", " "); err != nil { - log.Errorf(fmt.Sprintf("[SAML] XML marshalling failure during service provider encoding for %s: %v", ssoProvider.SAMLProvider.IssuerURI, err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] XML marshalling failure during service provider encoding for %s: %v", ssoProvider.SAMLProvider.IssuerURI, err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { response.Header().Set(headers.ContentType.String(), mediatypes.ApplicationSamlmetadataXml.String()) if _, err := response.Write(content); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Failed to write response for serving metadata: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to write response for serving metadata: %v", err)) } } } @@ -370,7 +370,7 @@ func (s ManagementResource) ServeSigningCertificate(response http.ResponseWriter // Note this is the public cert not necessarily the IDP cert response.Header().Set(headers.ContentDisposition.String(), fmt.Sprintf("attachment; filename=\"%s-signing-certificate.pem\"", ssoProvider.Slug)) if _, err := response.Write([]byte(crypto.FormatCert(s.config.SAML.ServiceProviderCertificate))); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Failed to write response for serving signing certificate: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to write response for serving signing certificate: %v", err)) } } } @@ -382,7 +382,7 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if serviceProvider, err := auth.NewServiceProvider(*ctx.Get(request.Context()).Host, s.config, *ssoProvider.SAMLProvider); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Service provider creation failed: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Service provider creation failed: %v", err)) // Technical issues scenario v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else { @@ -397,7 +397,7 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque // TODO: add actual relay state support - BED-5071 if authReq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation, binding, saml.HTTPPostBinding); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Failed creating SAML authentication request: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed creating SAML authentication request: %v", err)) // SAML misconfiguration or technical issue // Since this likely indicates a configuration problem, we treat it as a misconfiguration scenario v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") @@ -405,7 +405,7 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque switch binding { case saml.HTTPRedirectBinding: if redirectURL, err := authReq.Redirect("", &serviceProvider); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Failed to format a redirect for SAML provider %s: %v", serviceProvider.EntityID, err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to format a redirect for SAML provider %s: %v", serviceProvider.EntityID, err)) // Likely a technical or configuration issue v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else { @@ -419,13 +419,13 @@ func (s ManagementResource) SAMLLoginHandler(response http.ResponseWriter, reque response.WriteHeader(http.StatusOK) if _, err := response.Write([]byte(fmt.Sprintf(authInitiationContentBodyFormat, authReq.Post("")))); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Failed to write response with HTTP POST binding: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to write response with HTTP POST binding: %v", err)) // Technical issues scenario v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } default: - log.Errorf(fmt.Sprintf("[SAML] Unhandled binding type %s", binding)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Unhandled binding type %s", binding)) // Treating unknown binding as a misconfiguration v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } @@ -439,10 +439,10 @@ func (s ManagementResource) SAMLCallbackHandler(response http.ResponseWriter, re // SAML misconfiguration v2.RedirectToLoginPage(response, request, "Your SSO Connection failed, please contact your Administrator") } else if serviceProvider, err := auth.NewServiceProvider(*ctx.Get(request.Context()).Host, s.config, *ssoProvider.SAMLProvider); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Service provider creation failed: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Service provider creation failed: %v", err)) v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") } else if err := request.ParseForm(); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Failed to parse form POST: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to parse form POST: %v", err)) // Technical issues or invalid form data // This is not covered by acceptance criteria directly; treat as technical issue v2.RedirectToLoginPage(response, request, "We’re having trouble connecting. Please check your internet and try again.") @@ -450,21 +450,21 @@ func (s ManagementResource) SAMLCallbackHandler(response http.ResponseWriter, re var typedErr *saml.InvalidResponseError switch { case errors.As(err, &typedErr): - log.Errorf(fmt.Sprintf("[SAML] Failed to parse ACS response for provider %s: %v - %s", ssoProvider.SAMLProvider.IssuerURI, typedErr.PrivateErr, typedErr.Response)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to parse ACS response for provider %s: %v - %s", ssoProvider.SAMLProvider.IssuerURI, typedErr.PrivateErr, typedErr.Response)) default: - log.Errorf(fmt.Sprintf("[SAML] Failed to parse ACS response for provider %s: %v", ssoProvider.SAMLProvider.IssuerURI, err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to parse ACS response for provider %s: %v", ssoProvider.SAMLProvider.IssuerURI, err)) } // SAML credentials issue scenario (authentication failed) v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else if principalName, err := ssoProvider.SAMLProvider.GetSAMLUserPrincipalNameFromAssertion(assertion); err != nil { - log.Errorf(fmt.Sprintf("[SAML] Failed to lookup user for SAML provider %s: %v", ssoProvider.Name, err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Failed to lookup user for SAML provider %s: %v", ssoProvider.Name, err)) // SAML credentials issue scenario again v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else { if ssoProvider.Config.AutoProvision.Enabled { if err := jitSAMLUserCreation(request.Context(), ssoProvider, principalName, assertion, s.db); err != nil { // It is safe to let this request drop into the CreateSSOSession function below to ensure proper audit logging - log.Errorf(fmt.Sprintf("[SAML] Error during JIT User Creation: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("[SAML] Error during JIT User Creation: %v", err)) } } diff --git a/cmd/api/src/api/v2/collectors.go b/cmd/api/src/api/v2/collectors.go index 8850943aae..b1a522987c 100644 --- a/cmd/api/src/api/v2/collectors.go +++ b/cmd/api/src/api/v2/collectors.go @@ -18,12 +18,12 @@ package v2 import ( "fmt" + "log/slog" "net/http" "os" "path/filepath" "github.com/gorilla/mux" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" ) @@ -64,7 +64,7 @@ func (s *Resources) GetCollectorManifest(response http.ResponseWriter, request * if CollectorType(collectorType).String() == "InvalidCollectorType" { api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("Invalid collector type: %s", collectorType), request), response) } else if collectorManifest, ok := s.CollectorManifests[collectorType]; !ok { - log.Errorf(fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { api.WriteBasicResponse(request.Context(), collectorManifest, http.StatusOK, response) @@ -84,7 +84,7 @@ func (s *Resources) DownloadCollectorByVersion(response http.ResponseWriter, req api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("Invalid collector type: %s", collectorType), request), response) } else if releaseTag == "latest" { if collectorManifest, ok := s.CollectorManifests[collectorType]; !ok { - log.Errorf(fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) return } else { @@ -95,7 +95,7 @@ func (s *Resources) DownloadCollectorByVersion(response http.ResponseWriter, req } if data, err := os.ReadFile(filepath.Join(s.Config.CollectorsDirectory(), collectorType, fileName)); err != nil { - log.Errorf(fmt.Sprintf("Could not open collector file for download: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Could not open collector file for download: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { api.WriteBinaryResponse(request.Context(), data, fileName, http.StatusOK, response) @@ -115,7 +115,7 @@ func (s *Resources) DownloadCollectorChecksumByVersion(response http.ResponseWri api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusBadRequest, fmt.Sprintf("Invalid collector type: %s", collectorType), request), response) } else if releaseTag == "latest" { if collectorManifest, ok := s.CollectorManifests[collectorType]; !ok { - log.Errorf(fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Manifest doesn't exist for %s collector", collectorType)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) return } else { @@ -126,7 +126,7 @@ func (s *Resources) DownloadCollectorChecksumByVersion(response http.ResponseWri } if data, err := os.ReadFile(filepath.Join(s.Config.CollectorsDirectory(), collectorType, fileName)); err != nil { - log.Errorf(fmt.Sprintf("Could not open collector file for download: %v", err)) + slog.ErrorContext(request.Context(), fmt.Sprintf("Could not open collector file for download: %v", err)) api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusInternalServerError, api.ErrorResponseDetailsInternalServerError, request), response) } else { api.WriteBinaryResponse(request.Context(), data, fileName, http.StatusOK, response) diff --git a/cmd/api/src/api/v2/cypherquery.go b/cmd/api/src/api/v2/cypherquery.go index 0091b5c256..ecdac14ae5 100644 --- a/cmd/api/src/api/v2/cypherquery.go +++ b/cmd/api/src/api/v2/cypherquery.go @@ -19,10 +19,10 @@ package v2 import ( "errors" "fmt" + "log/slog" "net/http" "github.com/specterops/bloodhound/dawgs/util" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -108,7 +108,7 @@ func (s Resources) cypherMutation(request *http.Request, preparedQuery queries.P if err := s.DB.AppendAuditLog(request.Context(), auditLogEntry); err != nil { // We want to keep err scoped because having info on the mutation graph response trumps this error - log.Errorf(fmt.Sprintf("failure to create mutation audit log %s", err.Error())) + slog.ErrorContext(request.Context(), fmt.Sprintf("failure to create mutation audit log %s", err.Error())) } return graphResponse, err diff --git a/cmd/api/src/api/v2/database_wipe.go b/cmd/api/src/api/v2/database_wipe.go index 3ae1ddc297..0f21415648 100644 --- a/cmd/api/src/api/v2/database_wipe.go +++ b/cmd/api/src/api/v2/database_wipe.go @@ -19,6 +19,7 @@ package v2 import ( "context" "fmt" + "log/slog" "net/http" "strings" @@ -183,7 +184,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt func (s Resources) deleteHighValueSelectors(ctx context.Context, auditEntry *model.AuditEntry, assetGroupIDs []int) (failure bool) { if err := s.DB.DeleteAssetGroupSelectorsForAssetGroups(ctx, assetGroupIDs); err != nil { - log.Errorf(fmt.Sprintf("%s: %s", "there was an error deleting asset group selectors ", err.Error())) + slog.ErrorContext(ctx, fmt.Sprintf("%s: %s", "there was an error deleting asset group selectors ", err.Error())) s.handleAuditLogForDatabaseWipe(ctx, auditEntry, false, "high value selectors") return true } else { @@ -195,7 +196,7 @@ func (s Resources) deleteHighValueSelectors(ctx context.Context, auditEntry *mod func (s Resources) deleteFileIngestHistory(ctx context.Context, auditEntry *model.AuditEntry) (failure bool) { if err := s.DB.DeleteAllFileUploads(ctx); err != nil { - log.Errorf(fmt.Sprintf("%s: %s", "there was an error deleting file ingest history", err.Error())) + slog.ErrorContext(ctx, fmt.Sprintf("%s: %s", "there was an error deleting file ingest history", err.Error())) s.handleAuditLogForDatabaseWipe(ctx, auditEntry, false, "file ingest history") return true } else { @@ -206,7 +207,7 @@ func (s Resources) deleteFileIngestHistory(ctx context.Context, auditEntry *mode func (s Resources) deleteDataQualityHistory(ctx context.Context, auditEntry *model.AuditEntry) (failure bool) { if err := s.DB.DeleteAllDataQuality(ctx); err != nil { - log.Errorf(fmt.Sprintf("%s: %s", "there was an error deleting data quality history", err.Error())) + slog.ErrorContext(ctx, fmt.Sprintf("%s: %s", "there was an error deleting data quality history", err.Error())) s.handleAuditLogForDatabaseWipe(ctx, auditEntry, false, "data quality history") return true } else { @@ -229,6 +230,6 @@ func (s Resources) handleAuditLogForDatabaseWipe(ctx context.Context, auditEntry } if err := s.DB.AppendAuditLog(ctx, *auditEntry); err != nil { - log.Errorf(fmt.Sprintf("%s: %s", "error writing to audit log", err.Error())) + slog.ErrorContext(ctx, fmt.Sprintf("%s: %s", "error writing to audit log", err.Error())) } } diff --git a/cmd/api/src/api/v2/integration/api.go b/cmd/api/src/api/v2/integration/api.go index d1fb70fd2b..989732b277 100644 --- a/cmd/api/src/api/v2/integration/api.go +++ b/cmd/api/src/api/v2/integration/api.go @@ -19,11 +19,11 @@ package integration import ( "context" "fmt" + "log/slog" "net/http" "time" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" @@ -95,7 +95,7 @@ func (s *Context) EnableAPI() { } if err := initializer.Launch(s.ctx, false); err != nil { - log.Errorf(fmt.Sprintf("Failed launching API server: %v", err)) + slog.Error(fmt.Sprintf("Failed launching API server: %v", err)) } }() } diff --git a/cmd/api/src/auth/model.go b/cmd/api/src/auth/model.go index 5b11e0817d..d3f1d25da8 100644 --- a/cmd/api/src/auth/model.go +++ b/cmd/api/src/auth/model.go @@ -22,13 +22,13 @@ import ( "encoding/base64" "errors" "fmt" + "log/slog" "net/http" "strconv" "time" "github.com/gofrs/uuid" "github.com/golang-jwt/jwt/v4" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/database/types/null" "github.com/specterops/bloodhound/src/model" ) @@ -163,10 +163,10 @@ func (s Authorizer) AuditLogUnauthorizedAccess(request *http.Request) { if request.Method != "GET" { data := model.AuditData{"endpoint": request.Method + " " + request.URL.Path} if auditEntry, err := model.NewAuditEntry(model.AuditLogActionUnauthorizedAccessAttempt, model.AuditLogStatusFailure, data); err != nil { - log.Errorf(fmt.Sprintf("Error creating audit log for unauthorized access: %s", err.Error())) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error creating audit log for unauthorized access: %s", err.Error())) return } else if err = s.auditLogger.AppendAuditLog(request.Context(), auditEntry); err != nil { - log.Errorf(fmt.Sprintf("Error creating audit log for unauthorized access: %s", err.Error())) + slog.ErrorContext(request.Context(), fmt.Sprintf("Error creating audit log for unauthorized access: %s", err.Error())) } } } diff --git a/cmd/api/src/config/config.go b/cmd/api/src/config/config.go index cb8b45c6c5..e9d728a62f 100644 --- a/cmd/api/src/config/config.go +++ b/cmd/api/src/config/config.go @@ -253,7 +253,7 @@ func SetValuesFromEnv(varPrefix string, target any, env []string) error { } } } else { - log.Errorf(fmt.Sprintf("Invalid key/value pair: %+v", kvParts)) + slog.Error(fmt.Sprintf("Invalid key/value pair: %+v", kvParts)) } } @@ -293,13 +293,13 @@ func (s Configuration) SaveCollectorManifests() (CollectorManifests, error) { manifests := CollectorManifests{} if azureHoundManifest, err := generateCollectorManifest(filepath.Join(s.CollectorsDirectory(), azureHoundCollector)); err != nil { - log.Errorf(fmt.Sprintf("Error generating AzureHound manifest file: %s", err)) + slog.Error(fmt.Sprintf("Error generating AzureHound manifest file: %s", err)) } else { manifests[azureHoundCollector] = azureHoundManifest } if sharpHoundManifest, err := generateCollectorManifest(filepath.Join(s.CollectorsDirectory(), sharpHoundCollector)); err != nil { - log.Errorf(fmt.Sprintf("Error generating SharpHound manifest file: %s", err)) + slog.Error(fmt.Sprintf("Error generating SharpHound manifest file: %s", err)) } else { manifests[sharpHoundCollector] = sharpHoundManifest } diff --git a/cmd/api/src/daemons/api/bhapi/api.go b/cmd/api/src/daemons/api/bhapi/api.go index d71a4314a8..9c299fdd6f 100644 --- a/cmd/api/src/daemons/api/bhapi/api.go +++ b/cmd/api/src/daemons/api/bhapi/api.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "github.com/specterops/bloodhound/log" @@ -54,13 +55,13 @@ func (s Daemon) Start(ctx context.Context) { if s.cfg.TLS.Enabled() { if err := s.server.ListenAndServeTLS(s.cfg.TLS.CertFile, s.cfg.TLS.KeyFile); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("HTTP server listen error: %v", err)) } } } else { if err := s.server.ListenAndServe(); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("HTTP server listen error: %v", err)) } } } diff --git a/cmd/api/src/daemons/api/toolapi/api.go b/cmd/api/src/daemons/api/toolapi/api.go index 77917d157c..72de66be0c 100644 --- a/cmd/api/src/daemons/api/toolapi/api.go +++ b/cmd/api/src/daemons/api/toolapi/api.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "net/http/pprof" @@ -115,13 +116,13 @@ func (s Daemon) Start(ctx context.Context) { if s.cfg.TLS.Enabled() { if err := s.server.ListenAndServeTLS(s.cfg.TLS.CertFile, s.cfg.TLS.KeyFile); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("HTTP server listen error: %v", err)) } } } else { if err := s.server.ListenAndServe(); err != nil { if !errors.Is(err, http.ErrServerClosed) { - log.Errorf(fmt.Sprintf("HTTP server listen error: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("HTTP server listen error: %v", err)) } } } diff --git a/cmd/api/src/daemons/daemon.go b/cmd/api/src/daemons/daemon.go index bbae5a1534..6f7595136e 100644 --- a/cmd/api/src/daemons/daemon.go +++ b/cmd/api/src/daemons/daemon.go @@ -22,8 +22,6 @@ import ( "log/slog" "sync" "time" - - "github.com/specterops/bloodhound/log" ) type Daemon interface { @@ -68,7 +66,7 @@ func (s *Manager) Stop() { slog.Info(fmt.Sprintf("Shutting down daemon %s", daemon.Name())) if err := daemon.Stop(shutdownCtx); err != nil { - log.Errorf(fmt.Sprintf("Failure caught while shutting down daemon %s: %v", daemon.Name(), err)) + slog.Error(fmt.Sprintf("Failure caught while shutting down daemon %s: %v", daemon.Name(), err)) } } } diff --git a/cmd/api/src/daemons/datapipe/agi.go b/cmd/api/src/daemons/datapipe/agi.go index 707c1e3171..2a8acbb9a2 100644 --- a/cmd/api/src/daemons/datapipe/agi.go +++ b/cmd/api/src/daemons/datapipe/agi.go @@ -33,7 +33,6 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/database" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/model/appcfg" @@ -77,7 +76,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { // log missing tenant IDs for easier debugging for _, tenant := range tenants { if _, err = tenant.Properties.Get(azure.TenantID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Error getting tenant id for tenant %d: %v", tenant.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting tenant id for tenant %d: %v", tenant.ID, err)) } } @@ -117,7 +116,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { return nil }); err != nil { - log.Errorf(fmt.Sprintf("Failed tagging update: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed tagging update: %v", err)) } }() @@ -130,7 +129,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { for tenant := range tenantC { if roots, err := azureAnalysis.FetchAzureAttackPathRoots(tx, tenant); err != nil { - log.Errorf(fmt.Sprintf("Failed fetching roots for tenant %d: %v", tenant.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed fetching roots for tenant %d: %v", tenant.ID, err)) } else { for _, root := range roots { rootsC <- root.ID @@ -140,7 +139,7 @@ func ParallelTagAzureTierZero(ctx context.Context, db graph.Database) error { return nil }); err != nil { - log.Errorf(fmt.Sprintf("Error reading attack path roots for tenants: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error reading attack path roots for tenants: %v", err)) } }(workerID) } @@ -217,7 +216,7 @@ func RunAssetGroupIsolationCollections(ctx context.Context, db database.Database for idx, node := range assetGroupNodes { if objectID, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Node %d that does not have valid %s property", node.ID, common.ObjectID)) + slog.ErrorContext(ctx, fmt.Sprintf("Node %d that does not have valid %s property", node.ID, common.ObjectID)) } else { entries[idx] = model.AssetGroupCollectionEntry{ ObjectID: objectID, diff --git a/cmd/api/src/daemons/datapipe/analysis.go b/cmd/api/src/daemons/datapipe/analysis.go index 9fa8df1274..427a0dcf5a 100644 --- a/cmd/api/src/daemons/datapipe/analysis.go +++ b/cmd/api/src/daemons/datapipe/analysis.go @@ -20,10 +20,10 @@ import ( "context" "errors" "fmt" + "log/slog" adAnalysis "github.com/specterops/bloodhound/analysis/ad" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/analysis/ad" "github.com/specterops/bloodhound/src/analysis/azure" "github.com/specterops/bloodhound/src/config" @@ -103,7 +103,7 @@ func RunAnalysisOperations(ctx context.Context, db database.Database, graphDB gr if len(collectedErrors) > 0 { for _, err := range collectedErrors { - log.Errorf(fmt.Sprintf("Analysis error encountered: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Analysis error encountered: %v", err)) } } diff --git a/cmd/api/src/daemons/datapipe/azure_convertors.go b/cmd/api/src/daemons/datapipe/azure_convertors.go index 5314ee0515..78477315d2 100644 --- a/cmd/api/src/daemons/datapipe/azure_convertors.go +++ b/cmd/api/src/daemons/datapipe/azure_convertors.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "strings" "github.com/bloodhoundad/azurehound/v2/enums" @@ -150,7 +151,7 @@ func getKindConverter(kind enums.Kind) func(json.RawMessage, *ConvertedAzureData func convertAzureApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.App if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf("Error deserializing azure application: %v", err)) + slog.Error(fmt.Sprintf("Error deserializing azure application: %v", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAZAppToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAZAppRelationships(data)...) @@ -160,7 +161,7 @@ func convertAzureApp(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureVMScaleSet(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VMScaleSet if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine scale set", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine scale set", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAZVMScaleSetToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAZVMScaleSetRelationships(data)...) @@ -171,7 +172,7 @@ func convertAzureVMScaleSetRoleAssignment(raw json.RawMessage, converted *Conver var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine scale set role assignments", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine scale set role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVMScaleSetRoleAssignment(data)...) } @@ -183,18 +184,18 @@ func convertAzureAppOwner(raw json.RawMessage, converted *ConvertedAzureData) { ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "app owner", err)) + slog.Error(fmt.Sprintf(SerialError, "app owner", err)) } else { for _, raw := range data.Owners { var ( owner azureModels.DirectoryObject ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "app owner", err)) + slog.Error(fmt.Sprintf(SerialError, "app owner", err)) } else if ownerType, err := ein.ExtractTypeFromDirectoryObject(owner); errors.Is(err, ein.ErrInvalidType) { log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(fmt.Sprintf(ExtractError, err)) + slog.Error(fmt.Sprintf(ExtractError, err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureOwnerToRel(owner, ownerType, azure.App, data.AppId)) } @@ -206,7 +207,7 @@ func convertAzureAppRoleAssignment(raw json.RawMessage, converted *ConvertedAzur var data models.AppRoleAssignment if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "app role assignment", err)) + slog.Error(fmt.Sprintf(SerialError, "app role assignment", err)) } else if data.AppId == azure.MSGraphAppUniversalID && data.PrincipalType == PrincipalTypeServicePrincipal { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureAppRoleAssignmentToNodes(data)...) if rel := ein.ConvertAzureAppRoleAssignmentToRel(data); rel.IsValid() { @@ -218,7 +219,7 @@ func convertAzureAppRoleAssignment(raw json.RawMessage, converted *ConvertedAzur func convertAzureDevice(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Device if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure device", err)) + slog.Error(fmt.Sprintf(SerialError, "azure device", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAZDeviceToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAZDeviceRelationships(data)...) @@ -230,18 +231,18 @@ func convertAzureDeviceOwner(raw json.RawMessage, converted *ConvertedAzureData) data models.DeviceOwners ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "device owners", err)) + slog.Error(fmt.Sprintf(SerialError, "device owners", err)) } else { for _, raw := range data.Owners { var ( owner azureModels.DirectoryObject ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "device owner", err)) + slog.Error(fmt.Sprintf(SerialError, "device owner", err)) } else if ownerType, err := ein.ExtractTypeFromDirectoryObject(owner); errors.Is(err, ein.ErrInvalidType) { log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(fmt.Sprintf(ExtractError, err)) + slog.Error(fmt.Sprintf(ExtractError, err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureOwnerToRel(owner, ownerType, azure.Device, data.DeviceId)) } @@ -252,7 +253,7 @@ func convertAzureDeviceOwner(raw json.RawMessage, converted *ConvertedAzureData) func convertAzureFunctionApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.FunctionApp if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure function app", err)) + slog.Error(fmt.Sprintf(SerialError, "azure function app", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureFunctionAppToNode(data)) converted.RelProps = append(converted.RelProps, ein.ConvertAzureFunctionAppToRels(data)...) @@ -263,7 +264,7 @@ func convertAzureFunctionAppRoleAssignment(raw json.RawMessage, converted *Conve var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure function app role assignments", err)) + slog.Error(fmt.Sprintf(SerialError, "azure function app role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureFunctionAppRoleAssignmentToRels(data)...) } @@ -272,7 +273,7 @@ func convertAzureFunctionAppRoleAssignment(raw json.RawMessage, converted *Conve func convertAzureGroup(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Group if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure group", err)) + slog.Error(fmt.Sprintf(SerialError, "azure group", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureGroupToNode(data)) if onPremNode := ein.ConvertAzureGroupToOnPremisesNode(data); onPremNode.IsValid() { @@ -288,7 +289,7 @@ func convertAzureGroupMember(raw json.RawMessage, converted *ConvertedAzureData) ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure group members", err)) + slog.Error(fmt.Sprintf(SerialError, "azure group members", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureGroupMembersToRels(data)...) } @@ -299,7 +300,7 @@ func convertAzureGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) data models.GroupOwners ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure group owners", err)) + slog.Error(fmt.Sprintf(SerialError, "azure group owners", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureGroupOwnerToRels(data)...) } @@ -308,7 +309,7 @@ func convertAzureGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) func convertAzureKeyVault(raw json.RawMessage, converted *ConvertedAzureData) { var data models.KeyVault if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure keyvault", err)) + slog.Error(fmt.Sprintf(SerialError, "azure keyvault", err)) } else { node, rel := ein.ConvertAzureKeyVault(data) converted.NodeProps = append(converted.NodeProps, node) @@ -322,7 +323,7 @@ func convertAzureKeyVaultAccessPolicy(raw json.RawMessage, converted *ConvertedA ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure key vault access policy", err)) + slog.Error(fmt.Sprintf(SerialError, "azure key vault access policy", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultAccessPolicy(data)...) } @@ -334,7 +335,7 @@ func convertAzureKeyVaultContributor(raw json.RawMessage, converted *ConvertedAz ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure keyvault contributor", err)) + slog.Error(fmt.Sprintf(SerialError, "azure keyvault contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultContributor(data)...) } @@ -346,7 +347,7 @@ func convertAzureKeyVaultKVContributor(raw json.RawMessage, converted *Converted ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure keyvault kvcontributor", err)) + slog.Error(fmt.Sprintf(SerialError, "azure keyvault kvcontributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultKVContributor(data)...) } @@ -358,7 +359,7 @@ func convertAzureKeyVaultOwner(raw json.RawMessage, converted *ConvertedAzureDat ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure keyvault owner", err)) + slog.Error(fmt.Sprintf(SerialError, "azure keyvault owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultOwnerToRels(data)...) } @@ -367,7 +368,7 @@ func convertAzureKeyVaultOwner(raw json.RawMessage, converted *ConvertedAzureDat func convertAzureKeyVaultUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.KeyVaultUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure keyvault user access admin", err)) + slog.Error(fmt.Sprintf(SerialError, "azure keyvault user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureKeyVaultUserAccessAdminToRels(data)...) } @@ -376,7 +377,7 @@ func convertAzureKeyVaultUserAccessAdmin(raw json.RawMessage, converted *Convert func convertAzureManagementGroupDescendant(raw json.RawMessage, converted *ConvertedAzureData) { var data azureModels.DescendantInfo if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure management group descendant list", err)) + slog.Error(fmt.Sprintf(SerialError, "azure management group descendant list", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagementGroupDescendantToRel(data)) } @@ -385,7 +386,7 @@ func convertAzureManagementGroupDescendant(raw json.RawMessage, converted *Conve func convertAzureManagementGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagementGroupOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure management group owner", err)) + slog.Error(fmt.Sprintf(SerialError, "azure management group owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagementGroupOwnerToRels(data)...) } @@ -394,7 +395,7 @@ func convertAzureManagementGroupOwner(raw json.RawMessage, converted *ConvertedA func convertAzureManagementGroupUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagementGroupUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure management group user access admin", err)) + slog.Error(fmt.Sprintf(SerialError, "azure management group user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagementGroupUserAccessAdminToRels(data)...) } @@ -403,7 +404,7 @@ func convertAzureManagementGroupUserAccessAdmin(raw json.RawMessage, converted * func convertAzureManagementGroup(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagementGroup if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure management group", err)) + slog.Error(fmt.Sprintf(SerialError, "azure management group", err)) } else { node, rel := ein.ConvertAzureManagementGroup(data) converted.RelProps = append(converted.RelProps, rel) @@ -414,7 +415,7 @@ func convertAzureManagementGroup(raw json.RawMessage, converted *ConvertedAzureD func convertAzureResourceGroup(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ResourceGroup if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure resource group", err)) + slog.Error(fmt.Sprintf(SerialError, "azure resource group", err)) } else { node, rel := ein.ConvertAzureResourceGroup(data) converted.RelProps = append(converted.RelProps, rel) @@ -425,7 +426,7 @@ func convertAzureResourceGroup(raw json.RawMessage, converted *ConvertedAzureDat func convertAzureResourceGroupOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ResourceGroupOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure keyvault", err)) + slog.Error(fmt.Sprintf(SerialError, "azure keyvault", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureResourceGroupOwnerToRels(data)...) } @@ -434,7 +435,7 @@ func convertAzureResourceGroupOwner(raw json.RawMessage, converted *ConvertedAzu func convertAzureResourceGroupUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ResourceGroupUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure resource group user access admin", err)) + slog.Error(fmt.Sprintf(SerialError, "azure resource group user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureResourceGroupUserAccessAdminToRels(data)...) } @@ -443,7 +444,7 @@ func convertAzureResourceGroupUserAccessAdmin(raw json.RawMessage, converted *Co func convertAzureRole(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Role if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure role", err)) + slog.Error(fmt.Sprintf(SerialError, "azure role", err)) } else { node, rel := ein.ConvertAzureRole(data) converted.NodeProps = append(converted.NodeProps, node) @@ -454,7 +455,7 @@ func convertAzureRole(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureRoleAssignment(raw json.RawMessage, converted *ConvertedAzureData) { var data models.RoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure role assignment", err)) + slog.Error(fmt.Sprintf(SerialError, "azure role assignment", err)) } else { for _, raw := range data.RoleAssignments { var ( @@ -469,7 +470,7 @@ func convertAzureRoleAssignment(raw json.RawMessage, converted *ConvertedAzureDa func convertAzureServicePrincipal(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ServicePrincipal if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure service principal owner", err)) + slog.Error(fmt.Sprintf(SerialError, "azure service principal owner", err)) } else { nodes, rels := ein.ConvertAzureServicePrincipal(data) converted.NodeProps = append(converted.NodeProps, nodes...) @@ -482,7 +483,7 @@ func convertAzureServicePrincipalOwner(raw json.RawMessage, converted *Converted data models.ServicePrincipalOwners ) if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure service principal owners", err)) + slog.Error(fmt.Sprintf(SerialError, "azure service principal owners", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureServicePrincipalOwnerToRels(data)...) } @@ -491,7 +492,7 @@ func convertAzureServicePrincipalOwner(raw json.RawMessage, converted *Converted func convertAzureSubscription(raw json.RawMessage, converted *ConvertedAzureData) { var data azureModels.Subscription if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure subscription", err)) + slog.Error(fmt.Sprintf(SerialError, "azure subscription", err)) } else { node, rel := ein.ConvertAzureSubscription(data) converted.NodeProps = append(converted.NodeProps, node) @@ -502,7 +503,7 @@ func convertAzureSubscription(raw json.RawMessage, converted *ConvertedAzureData func convertAzureSubscriptionOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.SubscriptionOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure subscription owner", err)) + slog.Error(fmt.Sprintf(SerialError, "azure subscription owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureSubscriptionOwnerToRels(data)...) } @@ -511,7 +512,7 @@ func convertAzureSubscriptionOwner(raw json.RawMessage, converted *ConvertedAzur func convertAzureSubscriptionUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.SubscriptionUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure subscription user access admin", err)) + slog.Error(fmt.Sprintf(SerialError, "azure subscription user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureSubscriptionUserAccessAdminToRels(data)...) } @@ -520,7 +521,7 @@ func convertAzureSubscriptionUserAccessAdmin(raw json.RawMessage, converted *Con func convertAzureTenant(raw json.RawMessage, converted *ConvertedAzureData) { var data models.Tenant if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure tenant", err)) + slog.Error(fmt.Sprintf(SerialError, "azure tenant", err)) } else { converted.NodeProps = append(converted.NodeProps, ein.ConvertAzureTenantToNode(data)) } @@ -529,7 +530,7 @@ func convertAzureTenant(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureUser(raw json.RawMessage, converted *ConvertedAzureData) { var data models.User if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure user", err)) + slog.Error(fmt.Sprintf(SerialError, "azure user", err)) } else { node, onPremNode, rel := ein.ConvertAzureUser(data) converted.NodeProps = append(converted.NodeProps, node) @@ -543,7 +544,7 @@ func convertAzureUser(raw json.RawMessage, converted *ConvertedAzureData) { func convertAzureVirtualMachine(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachine if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine", err)) } else { node, rels := ein.ConvertAzureVirtualMachine(data) converted.NodeProps = append(converted.NodeProps, node) @@ -554,7 +555,7 @@ func convertAzureVirtualMachine(raw json.RawMessage, converted *ConvertedAzureDa func convertAzureVirtualMachineAdminLogin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineAdminLogins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine admin login", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine admin login", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineAdminLoginToRels(data)...) } @@ -563,7 +564,7 @@ func convertAzureVirtualMachineAdminLogin(raw json.RawMessage, converted *Conver func convertAzureVirtualMachineAvereContributor(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineAvereContributors if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine avere contributor", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine avere contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineAvereContributorToRels(data)...) } @@ -572,7 +573,7 @@ func convertAzureVirtualMachineAvereContributor(raw json.RawMessage, converted * func convertAzureVirtualMachineContributor(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineContributors if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine contributor", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineContributorToRels(data)...) } @@ -581,7 +582,7 @@ func convertAzureVirtualMachineContributor(raw json.RawMessage, converted *Conve func convertAzureVirtualMachineVMContributor(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineVMContributors if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine contributor", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine contributor", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineVMContributorToRels(data)...) } @@ -590,7 +591,7 @@ func convertAzureVirtualMachineVMContributor(raw json.RawMessage, converted *Con func convertAzureVirtualMachineOwner(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineOwners if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine owner", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine owner", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineOwnerToRels(data)...) } @@ -599,7 +600,7 @@ func convertAzureVirtualMachineOwner(raw json.RawMessage, converted *ConvertedAz func convertAzureVirtualMachineUserAccessAdmin(raw json.RawMessage, converted *ConvertedAzureData) { var data models.VirtualMachineUserAccessAdmins if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure virtual machine user access admin", err)) + slog.Error(fmt.Sprintf(SerialError, "azure virtual machine user access admin", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureVirtualMachineUserAccessAdminToRels(data)...) } @@ -608,7 +609,7 @@ func convertAzureVirtualMachineUserAccessAdmin(raw json.RawMessage, converted *C func convertAzureManagedCluster(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ManagedCluster if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure managed cluster", err)) + slog.Error(fmt.Sprintf(SerialError, "azure managed cluster", err)) } else { NodeResourceGroupID := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s", data.SubscriptionId, data.Properties.NodeResourceGroup) @@ -622,7 +623,7 @@ func convertAzureManagedClusterRoleAssignment(raw json.RawMessage, converted *Co var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure managed cluster role assignments", err)) + slog.Error(fmt.Sprintf(SerialError, "azure managed cluster role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureManagedClusterRoleAssignmentToRels(data)...) } @@ -631,7 +632,7 @@ func convertAzureManagedClusterRoleAssignment(raw json.RawMessage, converted *Co func convertAzureContainerRegistry(raw json.RawMessage, converted *ConvertedAzureData) { var data models.ContainerRegistry if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure container registry", err)) + slog.Error(fmt.Sprintf(SerialError, "azure container registry", err)) } else { node, rels := ein.ConvertAzureContainerRegistry(data) converted.NodeProps = append(converted.NodeProps, node) @@ -642,7 +643,7 @@ func convertAzureContainerRegistry(raw json.RawMessage, converted *ConvertedAzur func convertAzureWebApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.WebApp if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure web app", err)) + slog.Error(fmt.Sprintf(SerialError, "azure web app", err)) } else { node, relationships := ein.ConvertAzureWebApp(data) converted.NodeProps = append(converted.NodeProps, node) @@ -654,7 +655,7 @@ func convertAzureContainerRegistryRoleAssignment(raw json.RawMessage, converted var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure container registry role assignments", err)) + slog.Error(fmt.Sprintf(SerialError, "azure container registry role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureContainerRegistryRoleAssignment(data)...) } @@ -664,7 +665,7 @@ func convertAzureWebAppRoleAssignment(raw json.RawMessage, converted *ConvertedA var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure web app role assignments", err)) + slog.Error(fmt.Sprintf(SerialError, "azure web app role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureWebAppRoleAssignment(data)...) } @@ -673,7 +674,7 @@ func convertAzureWebAppRoleAssignment(raw json.RawMessage, converted *ConvertedA func convertAzureLogicApp(raw json.RawMessage, converted *ConvertedAzureData) { var data models.LogicApp if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure logic app", err)) + slog.Error(fmt.Sprintf(SerialError, "azure logic app", err)) } else { node, relationships := ein.ConvertAzureLogicApp(data) converted.NodeProps = append(converted.NodeProps, node) @@ -685,7 +686,7 @@ func convertAzureLogicAppRoleAssignment(raw json.RawMessage, converted *Converte var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure logic app role assignments", err)) + slog.Error(fmt.Sprintf(SerialError, "azure logic app role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureLogicAppRoleAssignment(data)...) } @@ -694,7 +695,7 @@ func convertAzureLogicAppRoleAssignment(raw json.RawMessage, converted *Converte func convertAzureAutomationAccount(raw json.RawMessage, converted *ConvertedAzureData) { var data models.AutomationAccount if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure automation account", err)) + slog.Error(fmt.Sprintf(SerialError, "azure automation account", err)) } else { node, relationships := ein.ConvertAzureAutomationAccount(data) converted.NodeProps = append(converted.NodeProps, node) @@ -706,7 +707,7 @@ func convertAzureAutomationAccountRoleAssignment(raw json.RawMessage, converted var data models.AzureRoleAssignments if err := json.Unmarshal(raw, &data); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure automation account role assignments", err)) + slog.Error(fmt.Sprintf(SerialError, "azure automation account role assignments", err)) } else { converted.RelProps = append(converted.RelProps, ein.ConvertAzureAutomationAccountRoleAssignment(data)...) } diff --git a/cmd/api/src/daemons/datapipe/cleanup.go b/cmd/api/src/daemons/datapipe/cleanup.go index aabcbeb426..0ea3c3878f 100644 --- a/cmd/api/src/daemons/datapipe/cleanup.go +++ b/cmd/api/src/daemons/datapipe/cleanup.go @@ -85,7 +85,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin log.Debugf(fmt.Sprintf("OrphanFileSweeper expected names %v", expectedFileNames)) if dirEntries, err := s.fileOps.ReadDir(s.tempDirectoryRootPath); err != nil { - log.Errorf(fmt.Sprintf("Failed reading work directory %s: %v", s.tempDirectoryRootPath, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed reading work directory %s: %v", s.tempDirectoryRootPath, err)) } else { numDeleted := 0 @@ -117,7 +117,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin fullPath := filepath.Join(s.tempDirectoryRootPath, orphanedDirEntry.Name()) if err := s.fileOps.RemoveAll(fullPath); err != nil { - log.Errorf(fmt.Sprintf("Failed removing orphaned file %s: %v", fullPath, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed removing orphaned file %s: %v", fullPath, err)) } numDeleted += 1 diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 9ad4697ea3..3fc7e159a0 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -25,7 +25,6 @@ import ( "github.com/specterops/bloodhound/cache" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" @@ -69,7 +68,7 @@ func (s *Daemon) analyze() { // Ensure that the user-requested analysis switch is deleted. This is done at the beginning of the // function so that any re-analysis requests are caught while analysis is in-progress. if err := s.db.DeleteAnalysisRequest(s.ctx); err != nil { - log.Errorf(fmt.Sprintf("Error deleting analysis request: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error deleting analysis request: %v", err)) return } @@ -78,7 +77,7 @@ func (s *Daemon) analyze() { } if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusAnalyzing, false); err != nil { - log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error setting datapipe status: %v", err)) return } @@ -88,14 +87,14 @@ func (s *Daemon) analyze() { if errors.Is(err, ErrAnalysisFailed) { FailAnalyzedFileUploadJobs(s.ctx, s.db) if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, false); err != nil { - log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error setting datapipe status: %v", err)) return } } else if errors.Is(err, ErrAnalysisPartiallyCompleted) { PartialCompleteFileUploadJobs(s.ctx, s.db) if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, true); err != nil { - log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error setting datapipe status: %v", err)) return } } @@ -103,13 +102,13 @@ func (s *Daemon) analyze() { CompleteAnalyzedFileUploadJobs(s.ctx, s.db) if entityPanelCachingFlag, err := s.db.GetFlagByKey(s.ctx, appcfg.FeatureEntityPanelCaching); err != nil { - log.Errorf(fmt.Sprintf("Error retrieving entity panel caching flag: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error retrieving entity panel caching flag: %v", err)) } else { resetCache(s.cache, entityPanelCachingFlag.Enabled) } if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, true); err != nil { - log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error setting datapipe status: %v", err)) return } } @@ -117,7 +116,7 @@ func (s *Daemon) analyze() { func resetCache(cacher cache.Cache, _ bool) { if err := cacher.Reset(); err != nil { - log.Errorf(fmt.Sprintf("Error while resetting the cache: %v", err)) + slog.Error(fmt.Sprintf("Error while resetting the cache: %v", err)) } else { slog.Info("Cache successfully reset by datapipe daemon") } @@ -125,7 +124,7 @@ func resetCache(cacher cache.Cache, _ bool) { func (s *Daemon) ingestAvailableTasks() { if ingestTasks, err := s.db.GetAllIngestTasks(s.ctx); err != nil { - log.Errorf(fmt.Sprintf("Failed fetching available ingest tasks: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Failed fetching available ingest tasks: %v", err)) } else { s.processIngestTasks(s.ctx, ingestTasks) } @@ -163,7 +162,7 @@ func (s *Daemon) Start(ctx context.Context) { // If there are completed file upload jobs or if analysis was user-requested, perform analysis. if hasJobsWaitingForAnalysis, err := HasFileUploadJobsWaitingForAnalysis(s.ctx, s.db); err != nil { - log.Errorf(fmt.Sprintf("Failed looking up jobs waiting for analysis: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed looking up jobs waiting for analysis: %v", err)) } else if hasJobsWaitingForAnalysis || s.db.HasAnalysisRequest(s.ctx) { s.analyze() } @@ -185,18 +184,18 @@ func (s *Daemon) deleteData() { defer measure.Measure(slog.LevelInfo, "Purge Graph Data Completed")() if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusPurging, false); err != nil { - log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error setting datapipe status: %v", err)) return } slog.Info("Begin Purge Graph Data") if err := s.db.CancelAllFileUploads(s.ctx); err != nil { - log.Errorf(fmt.Sprintf("Error cancelling jobs during data deletion: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error cancelling jobs during data deletion: %v", err)) } else if err := s.db.DeleteAllIngestTasks(s.ctx); err != nil { - log.Errorf(fmt.Sprintf("Error deleting ingest tasks during data deletion: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error deleting ingest tasks during data deletion: %v", err)) } else if err := DeleteCollectedGraphData(s.ctx, s.graphdb); err != nil { - log.Errorf(fmt.Sprintf("Error deleting graph data: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error deleting graph data: %v", err)) } } @@ -206,7 +205,7 @@ func (s *Daemon) Stop(ctx context.Context) error { func (s *Daemon) clearOrphanedData() { if ingestTasks, err := s.db.GetAllIngestTasks(s.ctx); err != nil { - log.Errorf(fmt.Sprintf("Failed fetching available file upload ingest tasks: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Failed fetching available file upload ingest tasks: %v", err)) } else { expectedFiles := make([]string, len(ingestTasks)) diff --git a/cmd/api/src/daemons/datapipe/decoders.go b/cmd/api/src/daemons/datapipe/decoders.go index b683947d0d..592ef082cf 100644 --- a/cmd/api/src/daemons/datapipe/decoders.go +++ b/cmd/api/src/daemons/datapipe/decoders.go @@ -20,11 +20,11 @@ import ( "errors" "fmt" "io" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/util" "github.com/specterops/bloodhound/ein" - "github.com/specterops/bloodhound/log" ) /* @@ -50,7 +50,7 @@ func decodeBasicData[T any](batch graph.Batch, reader io.ReadSeeker, conversionF // This variable needs to be initialized here, otherwise the marshaller will cache the map in the struct var decodeTarget T if err := decoder.Decode(&decodeTarget); err != nil { - log.Errorf(fmt.Sprintf("Error decoding %T object: %v", decodeTarget, err)) + slog.Error(fmt.Sprintf("Error decoding %T object: %v", decodeTarget, err)) if errors.Is(err, io.EOF) { break } @@ -94,7 +94,7 @@ func decodeGroupData(batch graph.Batch, reader io.ReadSeeker) error { for decoder.More() { var group ein.Group if err = decoder.Decode(&group); err != nil { - log.Errorf(fmt.Sprintf("Error decoding group object: %v", err)) + slog.Error(fmt.Sprintf("Error decoding group object: %v", err)) if errors.Is(err, io.EOF) { break } @@ -136,7 +136,7 @@ func decodeSessionData(batch graph.Batch, reader io.ReadSeeker) error { for decoder.More() { var session ein.Session if err = decoder.Decode(&session); err != nil { - log.Errorf(fmt.Sprintf("Error decoding session object: %v", err)) + slog.Error(fmt.Sprintf("Error decoding session object: %v", err)) if errors.Is(err, io.EOF) { break } @@ -178,7 +178,7 @@ func decodeAzureData(batch graph.Batch, reader io.ReadSeeker) error { for decoder.More() { var data AzureBase if err = decoder.Decode(&data); err != nil { - log.Errorf(fmt.Sprintf("Error decoding azure object: %v", err)) + slog.Error(fmt.Sprintf("Error decoding azure object: %v", err)) if errors.Is(err, io.EOF) { break } diff --git a/cmd/api/src/daemons/datapipe/ingest.go b/cmd/api/src/daemons/datapipe/ingest.go index 860f4fab42..da122c2b7e 100644 --- a/cmd/api/src/daemons/datapipe/ingest.go +++ b/cmd/api/src/daemons/datapipe/ingest.go @@ -19,6 +19,7 @@ package datapipe import ( "fmt" "io" + "log/slog" "strings" "time" @@ -28,7 +29,6 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/model/ingest" "github.com/specterops/bloodhound/src/services/fileupload" ) @@ -145,7 +145,7 @@ func NormalizeEinNodeProperties(properties map[string]any, objectID string, nowU if name, typeMatches := rawName.(string); typeMatches { properties[common.Name.String()] = strings.ToUpper(name) } else { - log.Errorf(fmt.Sprintf("Bad type found for node name property during ingest. Expected string, got %T", rawName)) + slog.Error(fmt.Sprintf("Bad type found for node name property during ingest. Expected string, got %T", rawName)) } } @@ -153,7 +153,7 @@ func NormalizeEinNodeProperties(properties map[string]any, objectID string, nowU if os, typeMatches := rawOS.(string); typeMatches { properties[common.OperatingSystem.String()] = strings.ToUpper(os) } else { - log.Errorf(fmt.Sprintf("Bad type found for node operating system property during ingest. Expected string, got %T", rawOS)) + slog.Error(fmt.Sprintf("Bad type found for node operating system property during ingest. Expected string, got %T", rawOS)) } } @@ -161,7 +161,7 @@ func NormalizeEinNodeProperties(properties map[string]any, objectID string, nowU if dn, typeMatches := rawDN.(string); typeMatches { properties[ad.DistinguishedName.String()] = strings.ToUpper(dn) } else { - log.Errorf(fmt.Sprintf("Bad type found for node distinguished name property during ingest. Expected string, got %T", rawDN)) + slog.Error(fmt.Sprintf("Bad type found for node distinguished name property during ingest. Expected string, got %T", rawDN)) } } @@ -188,7 +188,7 @@ func IngestNodes(batch graph.Batch, identityKind graph.Kind, nodes []ein.Ingesti for _, next := range nodes { if err := IngestNode(batch, nowUTC, identityKind, next); err != nil { - log.Errorf(fmt.Sprintf("Error ingesting node ID %s: %v", next.ObjectID, err)) + slog.Error(fmt.Sprintf("Error ingesting node ID %s: %v", next.ObjectID, err)) errs.Add(err) } } @@ -231,7 +231,7 @@ func IngestRelationships(batch graph.Batch, nodeIDKind graph.Kind, relationships for _, next := range relationships { if err := IngestRelationship(batch, nowUTC, nodeIDKind, next); err != nil { - log.Errorf(fmt.Sprintf("Error ingesting relationship from %s to %s : %v", next.Source, next.Target, err)) + slog.Error(fmt.Sprintf("Error ingesting relationship from %s to %s : %v", next.Source, next.Target, err)) errs.Add(err) } } @@ -274,7 +274,7 @@ func IngestDNRelationships(batch graph.Batch, relationships []ein.IngestibleRela for _, next := range relationships { if err := ingestDNRelationship(batch, nowUTC, next); err != nil { - log.Errorf(fmt.Sprintf("Error ingesting relationship: %v", err)) + slog.Error(fmt.Sprintf("Error ingesting relationship: %v", err)) errs.Add(err) } } @@ -319,7 +319,7 @@ func IngestSessions(batch graph.Batch, sessions []ein.IngestibleSession) error { for _, next := range sessions { if err := ingestSession(batch, nowUTC, next); err != nil { - log.Errorf(fmt.Sprintf("Error ingesting sessions: %v", err)) + slog.Error(fmt.Sprintf("Error ingesting sessions: %v", err)) errs.Add(err) } } diff --git a/cmd/api/src/daemons/datapipe/jobs.go b/cmd/api/src/daemons/datapipe/jobs.go index 097da91d77..696ab1334f 100644 --- a/cmd/api/src/daemons/datapipe/jobs.go +++ b/cmd/api/src/daemons/datapipe/jobs.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "os" "github.com/specterops/bloodhound/bomenc" @@ -51,11 +52,11 @@ func FailAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { } if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusAnalyzing); err != nil { - log.Errorf(fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) } else { for _, job := range fileUploadJobsUnderAnalysis { if err := fileupload.UpdateFileUploadJobStatus(ctx, db, job, model.JobStatusFailed, "Analysis failed"); err != nil { - log.Errorf(fmt.Sprintf("Failed updating file upload job %d to failed status: %v", job.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed updating file upload job %d to failed status: %v", job.ID, err)) } } } @@ -69,11 +70,11 @@ func PartialCompleteFileUploadJobs(ctx context.Context, db database.Database) { } if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusAnalyzing); err != nil { - log.Errorf(fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) } else { for _, job := range fileUploadJobsUnderAnalysis { if err := fileupload.UpdateFileUploadJobStatus(ctx, db, job, model.JobStatusPartiallyComplete, "Partially Completed"); err != nil { - log.Errorf(fmt.Sprintf("Failed updating file upload job %d to partially completed status: %v", job.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed updating file upload job %d to partially completed status: %v", job.ID, err)) } } } @@ -87,7 +88,7 @@ func CompleteAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { } if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusAnalyzing); err != nil { - log.Errorf(fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to load file upload jobs under analysis: %v", err)) } else { for _, job := range fileUploadJobsUnderAnalysis { var ( @@ -106,7 +107,7 @@ func CompleteAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { } if err := fileupload.UpdateFileUploadJobStatus(ctx, db, job, status, message); err != nil { - log.Errorf(fmt.Sprintf("Error updating file upload job %d: %v", job.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error updating file upload job %d: %v", job.ID, err)) } } } @@ -120,14 +121,14 @@ func ProcessIngestedFileUploadJobs(ctx context.Context, db database.Database) { } if ingestingFileUploadJobs, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusIngesting); err != nil { - log.Errorf(fmt.Sprintf("Failed to look up finished file upload jobs: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to look up finished file upload jobs: %v", err)) } else { for _, ingestingFileUploadJob := range ingestingFileUploadJobs { if remainingIngestTasks, err := db.GetIngestTasksForJob(ctx, ingestingFileUploadJob.ID); err != nil { - log.Errorf(fmt.Sprintf("Failed looking up remaining ingest tasks for file upload job %d: %v", ingestingFileUploadJob.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed looking up remaining ingest tasks for file upload job %d: %v", ingestingFileUploadJob.ID, err)) } else if len(remainingIngestTasks) == 0 { if err := fileupload.UpdateFileUploadJobStatus(ctx, db, ingestingFileUploadJob, model.JobStatusAnalyzing, "Analyzing"); err != nil { - log.Errorf(fmt.Sprintf("Error updating fileupload job %d: %v", ingestingFileUploadJob.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error updating fileupload job %d: %v", ingestingFileUploadJob.ID, err)) } } } @@ -137,7 +138,7 @@ func ProcessIngestedFileUploadJobs(ctx context.Context, db database.Database) { // clearFileTask removes a generic file upload task for ingested data. func (s *Daemon) clearFileTask(ingestTask model.IngestTask) { if err := s.db.DeleteIngestTask(s.ctx, ingestTask); err != nil { - log.Errorf(fmt.Sprintf("Error removing file upload task from db: %v", err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error removing file upload task from db: %v", err)) } } @@ -184,9 +185,9 @@ func (s *Daemon) preProcessIngestFile(path string, fileType model.FileType) ([]s //Close the archive and delete it if err := archive.Close(); err != nil { - log.Errorf(fmt.Sprintf("Error closing archive %s: %v", path, err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error closing archive %s: %v", path, err)) } else if err := os.Remove(path); err != nil { - log.Errorf(fmt.Sprintf("Error deleting archive %s: %v", path, err)) + slog.ErrorContext(s.ctx, fmt.Sprintf("Error deleting archive %s: %v", path, err)) } return filePaths, failed, errs.Combined() @@ -198,7 +199,7 @@ func (s *Daemon) preProcessIngestFile(path string, fileType model.FileType) ([]s func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType model.FileType) (int, int, error) { adcsEnabled := false if adcsFlag, err := s.db.GetFlagByKey(ctx, appcfg.FeatureAdcs); err != nil { - log.Errorf(fmt.Sprintf("Error getting ADCS flag: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting ADCS flag: %v", err)) } else { adcsEnabled = adcsFlag.Enabled } @@ -215,15 +216,15 @@ func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType mo return err } else if err := ReadFileForIngest(batch, file, adcsEnabled); err != nil { failed++ - log.Errorf(fmt.Sprintf("Error reading ingest file %s: %v", filePath, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error reading ingest file %s: %v", filePath, err)) } if err := file.Close(); err != nil { - log.Errorf(fmt.Sprintf("Error closing ingest file %s: %v", filePath, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error closing ingest file %s: %v", filePath, err)) } else if err := os.Remove(filePath); errors.Is(err, fs.ErrNotExist) { log.Warnf(fmt.Sprintf("Removing ingest file %s: %v", filePath, err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Error removing ingest file %s: %v", filePath, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error removing ingest file %s: %v", filePath, err)) } } @@ -235,7 +236,7 @@ func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType mo // processIngestTasks covers the generic file upload case for ingested data. func (s *Daemon) processIngestTasks(ctx context.Context, ingestTasks model.IngestTasks) { if err := s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIngesting, false); err != nil { - log.Errorf(fmt.Sprintf("Error setting datapipe status: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error setting datapipe status: %v", err)) return } defer s.db.SetDatapipeStatus(s.ctx, model.DatapipeStatusIdle, false) @@ -256,14 +257,14 @@ func (s *Daemon) processIngestTasks(ctx context.Context, ingestTasks model.Inges if errors.Is(err, fs.ErrNotExist) { log.Warnf(fmt.Sprintf("Did not process ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed processing ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed processing ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) } else if job, err := s.db.GetFileUploadJob(ctx, ingestTask.TaskID.ValueOrZero()); err != nil { - log.Errorf(fmt.Sprintf("Failed to fetch job for ingest task %d: %v", ingestTask.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to fetch job for ingest task %d: %v", ingestTask.ID, err)) } else { job.TotalFiles = total job.FailedFiles += failed if err = s.db.UpdateFileUploadJob(ctx, job); err != nil { - log.Errorf(fmt.Sprintf("Failed to update number of failed files for file upload job ID %s: %v", job.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to update number of failed files for file upload job ID %s: %v", job.ID, err)) } } diff --git a/cmd/api/src/database/analysisrequest.go b/cmd/api/src/database/analysisrequest.go index 95a2dcd45d..fd66edad0b 100644 --- a/cmd/api/src/database/analysisrequest.go +++ b/cmd/api/src/database/analysisrequest.go @@ -23,7 +23,6 @@ import ( "log/slog" "time" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/model" ) @@ -54,7 +53,7 @@ func (s *BloodhoundDB) HasAnalysisRequest(ctx context.Context) bool { tx := s.db.WithContext(ctx).Raw(`select exists(select * from analysis_request_switch where request_type = ? limit 1);`, model.AnalysisRequestAnalysis).Scan(&exists) if tx.Error != nil { - log.Errorf(fmt.Sprintf("Error determining if there's an analysis request: %v", tx.Error)) + slog.ErrorContext(ctx, fmt.Sprintf("Error determining if there's an analysis request: %v", tx.Error)) } return exists } @@ -64,7 +63,7 @@ func (s *BloodhoundDB) HasCollectedGraphDataDeletionRequest(ctx context.Context) tx := s.db.WithContext(ctx).Raw(`select exists(select * from analysis_request_switch where request_type = ? limit 1);`, model.AnalysisRequestDeletion).Scan(&exists) if tx.Error != nil { - log.Errorf(fmt.Sprintf("Error determining if there's a deletion request: %v", tx.Error)) + slog.ErrorContext(ctx, fmt.Sprintf("Error determining if there's a deletion request: %v", tx.Error)) } return exists } diff --git a/cmd/api/src/database/db.go b/cmd/api/src/database/db.go index 0253ce48fe..bb815d3ee8 100644 --- a/cmd/api/src/database/db.go +++ b/cmd/api/src/database/db.go @@ -22,10 +22,10 @@ import ( "context" "errors" "fmt" + "log/slog" "time" "github.com/gofrs/uuid" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/database/migration" "github.com/specterops/bloodhound/src/model" @@ -170,9 +170,9 @@ type BloodhoundDB struct { func (s *BloodhoundDB) Close(ctx context.Context) { if sqlDBRef, err := s.db.WithContext(ctx).DB(); err != nil { - log.Errorf(fmt.Sprintf("Failed to fetch SQL DB reference from GORM: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to fetch SQL DB reference from GORM: %v", err)) } else if err := sqlDBRef.Close(); err != nil { - log.Errorf(fmt.Sprintf("Failed closing database: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed closing database: %v", err)) } } @@ -240,7 +240,7 @@ func (s *BloodhoundDB) Wipe(ctx context.Context) error { func (s *BloodhoundDB) Migrate(ctx context.Context) error { // Run the migrator if err := migration.NewMigrator(s.db.WithContext(ctx)).ExecuteStepwiseMigrations(); err != nil { - log.Errorf(fmt.Sprintf("Error during SQL database migration phase: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error during SQL database migration phase: %v", err)) return err } diff --git a/cmd/api/src/migrations/manifest.go b/cmd/api/src/migrations/manifest.go index 641ed4ee2a..ddaa4f0e2b 100644 --- a/cmd/api/src/migrations/manifest.go +++ b/cmd/api/src/migrations/manifest.go @@ -31,7 +31,6 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/version" ) @@ -194,7 +193,7 @@ func Version_277_Migration(db graph.Database) error { var dirty = false if objectId, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Error getting objectid for node %d: %v", node.ID, err)) + slog.Error(fmt.Sprintf("Error getting objectid for node %d: %v", node.ID, err)) continue } else if objectId != strings.ToUpper(objectId) { dirty = true @@ -225,7 +224,7 @@ func Version_277_Migration(db graph.Database) error { } else if node.Kinds.ContainsOneOf(azure.Entity) { identityKind = azure.Entity } else { - log.Errorf(fmt.Sprintf("Unable to figure out base kind of node %d", node.ID)) + slog.Error(fmt.Sprintf("Unable to figure out base kind of node %d", node.ID)) } if identityKind != nil { @@ -234,7 +233,7 @@ func Version_277_Migration(db graph.Database) error { IdentityKind: identityKind, IdentityProperties: []string{common.ObjectID.String()}, }); err != nil { - log.Errorf(fmt.Sprintf("Error updating node %d: %v", node.ID, err)) + slog.Error(fmt.Sprintf("Error updating node %d: %v", node.ID, err)) } } } diff --git a/cmd/api/src/model/audit.go b/cmd/api/src/model/audit.go index 22e9c6efe4..ca39b018b0 100644 --- a/cmd/api/src/model/audit.go +++ b/cmd/api/src/model/audit.go @@ -18,11 +18,11 @@ package model import ( "fmt" + "log/slog" "reflect" "time" "github.com/gofrs/uuid" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/database/types" ) @@ -228,7 +228,7 @@ func (s AuditEntry) String() string { func NewAuditEntry(action AuditLogAction, status AuditLogEntryStatus, data AuditData) (AuditEntry, error) { if commitId, err := uuid.NewV4(); err != nil { - log.Errorf(fmt.Sprintf("Error generating commit ID for audit entry: %s", err.Error())) + slog.Error(fmt.Sprintf("Error generating commit ID for audit entry: %s", err.Error())) return AuditEntry{}, err } else { return AuditEntry{Action: action, Model: data, Status: status, CommitID: commitId}, nil diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index c827ce95e2..3db84ca3b1 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -638,7 +638,7 @@ func (s *GraphQuery) GetEntityCountResults(ctx context.Context, node *graph.Node if result, err := runEntityQuery(ctx, s.Graph, delegate, node, 0, 0); errors.Is(err, graph.ErrContextTimedOut) { log.Warnf(fmt.Sprintf("Running entity query for key %s: %v", delegateKey, err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Error running entity query for key %s: %v", delegateKey, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error running entity query for key %s: %v", delegateKey, err)) data.Store(delegateKey, 0) } else { data.Store(delegateKey, result.Len()) @@ -786,7 +786,7 @@ func (s *GraphQuery) cacheQueryResult(queryStart time.Time, cacheKey string, res // Using GuardedSet here even though it isn't necessary because it allows us to collect information on how often // we run these queries in parallel if set, sizeInBytes, err := s.Cache.GuardedSet(cacheKey, result); err != nil { - log.Errorf(fmt.Sprintf("[Entity Results Cache] Failed to write results to cache for key: %s", cacheKey)) + slog.Error(fmt.Sprintf("[Entity Results Cache] Failed to write results to cache for key: %s", cacheKey)) } else if !set { log.Warnf(fmt.Sprintf("[Entity Results Cache] Cache entry for query %s not set because it already exists", cacheKey)) } else { @@ -937,14 +937,14 @@ func fromGraphNodes(nodes graph.NodeSet) []model.PagedNodeListEntry { ) if objectId, err := props.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Error getting objectid for %d: %v", node.ID, err)) + slog.Error(fmt.Sprintf("Error getting objectid for %d: %v", node.ID, err)) nodeEntry.ObjectID = "" } else { nodeEntry.ObjectID = objectId } if name, err := props.Get(common.Name.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Error getting name for %d: %v", node.ID, err)) + slog.Error(fmt.Sprintf("Error getting name for %d: %v", node.ID, err)) nodeEntry.Name = "" } else { nodeEntry.Name = name diff --git a/cmd/api/src/services/agi/agi.go b/cmd/api/src/services/agi/agi.go index 4aeca7d60f..d5184c163f 100644 --- a/cmd/api/src/services/agi/agi.go +++ b/cmd/api/src/services/agi/agi.go @@ -33,7 +33,6 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/model" ) @@ -95,7 +94,7 @@ func RunAssetGroupIsolationCollections(ctx context.Context, db AgiData, graphDB idx := 0 for _, node := range assetGroupNodes { if objectID, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Node %d that does not have valid %s property", node.ID, common.ObjectID)) + slog.ErrorContext(ctx, fmt.Sprintf("Node %d that does not have valid %s property", node.ID, common.ObjectID)) } else { entries[idx] = model.AssetGroupCollectionEntry{ ObjectID: objectID, diff --git a/cmd/api/src/services/fileupload/file_upload.go b/cmd/api/src/services/fileupload/file_upload.go index 3b9e3ff648..381f2f1485 100644 --- a/cmd/api/src/services/fileupload/file_upload.go +++ b/cmd/api/src/services/fileupload/file_upload.go @@ -22,18 +22,18 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "os" "time" "github.com/specterops/bloodhound/bomenc" "github.com/specterops/bloodhound/headers" + "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" + "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/model/ingest" "github.com/specterops/bloodhound/src/utils" - - "github.com/specterops/bloodhound/log" - "github.com/specterops/bloodhound/src/model" ) const jobActivityTimeout = time.Minute * 20 @@ -64,7 +64,7 @@ func ProcessStaleFileUploadJobs(ctx context.Context, db FileUploadData) { ) if jobs, err := db.GetFileUploadJobsWithStatus(ctx, model.JobStatusRunning); err != nil { - log.Errorf(fmt.Sprintf("Error getting running jobs: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting running jobs: %v", err)) } else { for _, job := range jobs { if job.LastIngest.Before(threshold) { @@ -74,7 +74,7 @@ func ProcessStaleFileUploadJobs(ctx context.Context, db FileUploadData) { job.LastIngest.Format(time.RFC3339))) if err := TimeOutUploadJob(ctx, db, job.ID, fmt.Sprintf("Ingest timeout: No ingest activity observed in %f minutes. Upload incomplete.", now.Sub(threshold).Minutes())); err != nil { - log.Errorf(fmt.Sprintf("Error marking file upload job %d as timed out: %v", job.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error marking file upload job %d as timed out: %v", job.ID, err)) } } } @@ -145,14 +145,14 @@ type FileValidator func(src io.Reader, dst io.Writer) error func WriteAndValidateFile(fileData io.ReadCloser, tempFile *os.File, validationFunc FileValidator) error { if err := validationFunc(fileData, tempFile); err != nil { if err := tempFile.Close(); err != nil { - log.Errorf(fmt.Sprintf("Error closing temp file %s with failed validation: %v", tempFile.Name(), err)) + slog.Error(fmt.Sprintf("Error closing temp file %s with failed validation: %v", tempFile.Name(), err)) } else if err := os.Remove(tempFile.Name()); err != nil { - log.Errorf(fmt.Sprintf("Error deleting temp file %s: %v", tempFile.Name(), err)) + slog.Error(fmt.Sprintf("Error deleting temp file %s: %v", tempFile.Name(), err)) } return err } else { if err := tempFile.Close(); err != nil { - log.Errorf(fmt.Sprintf("Error closing temp file with successful validation %s: %v", tempFile.Name(), err)) + slog.Error(fmt.Sprintf("Error closing temp file with successful validation %s: %v", tempFile.Name(), err)) } return nil } diff --git a/packages/go/analysis/ad/ad.go b/packages/go/analysis/ad/ad.go index 70aa461f1f..56784b57cc 100644 --- a/packages/go/analysis/ad/ad.go +++ b/packages/go/analysis/ad/ad.go @@ -35,7 +35,6 @@ import ( "github.com/specterops/bloodhound/dawgs/util" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) var ( @@ -203,9 +202,9 @@ func grabDomainInformation(tx graph.Transaction) (map[string]string, error) { }).Fetch(func(cursor graph.Cursor[*graph.Node]) error { for node := range cursor.Chan() { if domainObjectID, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Domain node %d does not have a valid object ID", node.ID)) + slog.Error(fmt.Sprintf("Domain node %d does not have a valid object ID", node.ID)) } else if domainName, err := node.Properties.Get(common.Name.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Domain node %d does not have a valid name", node.ID)) + slog.Error(fmt.Sprintf("Domain node %d does not have a valid name", node.ID)) } else { domainNamesByObjectID[domainObjectID] = domainName } @@ -234,9 +233,9 @@ func LinkWellKnownGroups(ctx context.Context, db graph.Database) error { for _, domain := range domains { if domainSid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Error getting domain sid for domain %d: %v", domain.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting domain sid for domain %d: %v", domain.ID, err)) } else if domainName, err := domain.Properties.Get(common.Name.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Error getting domain name for domain %d: %v", domain.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting domain name for domain %d: %v", domain.ID, err)) } else { var ( domainId = domain.ID @@ -265,7 +264,7 @@ func LinkWellKnownGroups(ctx context.Context, db graph.Database) error { return nil } }); err != nil { - log.Errorf(fmt.Sprintf("Error linking well known groups for domain %d: %v", domain.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error linking well known groups for domain %d: %v", domain.ID, err)) errors.Add(fmt.Errorf("failed linking well known groups for domain %d: %w", domain.ID, err)) } } @@ -322,7 +321,7 @@ func createOrUpdateWellKnownLink(tx graph.Transaction, startNode *graph.Node, en // See CalculateCrossProductNodeSetsDoc.md for explaination of the specialGroups (Authenticated Users and Everyone) and why we treat them the way we do func CalculateCrossProductNodeSets(tx graph.Transaction, domainsid string, groupExpansions impact.PathAggregator, nodeSlices ...[]*graph.Node) cardinality.Duplex[uint64] { if len(nodeSlices) < 2 { - log.Errorf(fmt.Sprintf("Cross products require at least 2 nodesets")) + slog.Error(fmt.Sprintf("Cross products require at least 2 nodesets")) return cardinality.NewBitmap64() } @@ -346,7 +345,7 @@ func CalculateCrossProductNodeSets(tx graph.Transaction, domainsid string, group specialGroups, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid) if err != nil { - log.Errorf(fmt.Sprintf("Could not fetch groups: %s", err.Error())) + slog.Error(fmt.Sprintf("Could not fetch groups: %s", err.Error())) } //Unroll all nodesets diff --git a/packages/go/analysis/ad/adcs.go b/packages/go/analysis/ad/adcs.go index d415a656b1..63f029ecc5 100644 --- a/packages/go/analysis/ad/adcs.go +++ b/packages/go/analysis/ad/adcs.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/analysis/impact" @@ -117,7 +118,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostGoldenCert(ctx, tx, outC, domain, enterpriseCA); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.GoldenCert.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.GoldenCert.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.GoldenCert.String(), err)) } return nil }) @@ -126,7 +127,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC1(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC1.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC1.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC1.String(), err)) } return nil }) @@ -135,7 +136,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC3(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC3.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC3.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC3.String(), err)) } return nil }) @@ -144,7 +145,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC4(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC4.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC4.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC4.String(), err)) } return nil }) @@ -153,7 +154,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC6a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6a.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6a.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6a.String(), err)) } return nil }) @@ -162,7 +163,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC6b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6b.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6b.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6b.String(), err)) } return nil }) @@ -171,7 +172,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC9a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9a.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9a.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9a.String(), err)) } return nil }) @@ -180,7 +181,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC9b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9b.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9b.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9b.String(), err)) } return nil }) @@ -189,7 +190,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC10a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10a.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10a.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10a.String(), err)) } return nil }) @@ -198,7 +199,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC10b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10b.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10b.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10b.String(), err)) } return nil }) @@ -207,7 +208,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N if err := PostADCSESC13(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC13.String(), err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC13.String(), err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC13.String(), err)) } return nil }) diff --git a/packages/go/analysis/ad/adcscache.go b/packages/go/analysis/ad/adcscache.go index f53f3b261a..6b9efb6e03 100644 --- a/packages/go/analysis/ad/adcscache.go +++ b/packages/go/analysis/ad/adcscache.go @@ -71,7 +71,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris for _, ct := range certTemplates { // cert template enrollers if firstDegreePrincipals, err := fetchFirstDegreeNodes(tx, ct, ad.Enroll, ad.GenericAll, ad.AllExtendedRights); err != nil { - log.Errorf(fmt.Sprintf("Error fetching enrollers for cert template %d: %v", ct.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error fetching enrollers for cert template %d: %v", ct.ID, err)) } else { s.certTemplateEnrollers[ct.ID] = firstDegreePrincipals.Slice() @@ -79,7 +79,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris if domainsid, err := ct.Properties.Get(ad.DomainSID.String()).String(); err != nil { log.Warnf(fmt.Sprintf("Error getting domain SID for certtemplate %d: %v", ct.ID, err)) } else if authUsersOrEveryoneHasEnroll, err := containsAuthUsersOrEveryone(tx, firstDegreePrincipals.Slice(), domainsid); err != nil { - log.Errorf(fmt.Sprintf("Error fetching if auth. users or everyone has enroll on certtemplate %d: %v", ct.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error fetching if auth. users or everyone has enroll on certtemplate %d: %v", ct.ID, err)) } else { s.certTemplateHasSpecialEnrollers[ct.ID] = authUsersOrEveryoneHasEnroll } @@ -87,7 +87,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris // cert template controllers if firstDegreePrincipals, err := fetchFirstDegreeNodes(tx, ct, ad.Owns, ad.GenericAll, ad.WriteDACL, ad.WriteOwner); err != nil { - log.Errorf(fmt.Sprintf("Error fetching controllers for cert template %d: %v", ct.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error fetching controllers for cert template %d: %v", ct.ID, err)) } else { s.certTemplateControllers[ct.ID] = firstDegreePrincipals.Slice() } @@ -96,7 +96,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris for _, eca := range enterpriseCAs { if firstDegreeEnrollers, err := fetchFirstDegreeNodes(tx, eca, ad.Enroll); err != nil { - log.Errorf(fmt.Sprintf("Error fetching enrollers for enterprise ca %d: %v", eca.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error fetching enrollers for enterprise ca %d: %v", eca.ID, err)) } else { s.enterpriseCAEnrollers[eca.ID] = firstDegreeEnrollers.Slice() @@ -104,14 +104,14 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris if domainsid, err := eca.Properties.Get(ad.DomainSID.String()).String(); err != nil { log.Warnf(fmt.Sprintf("Error getting domain SID for eca %d: %v", eca.ID, err)) } else if authUsersOrEveryoneHasEnroll, err := containsAuthUsersOrEveryone(tx, firstDegreeEnrollers.Slice(), domainsid); err != nil { - log.Errorf(fmt.Sprintf("Error fetching if auth. users or everyone has enroll on enterprise ca %d: %v", eca.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error fetching if auth. users or everyone has enroll on enterprise ca %d: %v", eca.ID, err)) } else { s.enterpriseCAHasSpecialEnrollers[eca.ID] = authUsersOrEveryoneHasEnroll } } if publishedTemplates, err := FetchCertTemplatesPublishedToCA(tx, eca); err != nil { - log.Errorf(fmt.Sprintf("Error fetching published cert templates for enterprise ca %d: %v", eca.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error fetching published cert templates for enterprise ca %d: %v", eca.ID, err)) } else { s.publishedTemplateCache[eca.ID] = publishedTemplates.Slice() } @@ -119,9 +119,9 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris for _, domain := range domains { if rootCaForNodes, err := FetchEnterpriseCAsRootCAForPathToDomain(tx, domain); err != nil { - log.Errorf(fmt.Sprintf("Error getting cas via rootcafor for domain %d: %v", domain.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting cas via rootcafor for domain %d: %v", domain.ID, err)) } else if authStoreForNodes, err := FetchEnterpriseCAsTrustedForNTAuthToDomain(tx, domain); err != nil { - log.Errorf(fmt.Sprintf("Error getting cas via authstorefor for domain %d: %v", domain.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting cas via authstorefor for domain %d: %v", domain.ID, err)) } else { s.authStoreForChainValid[domain.ID] = graph.NodeSetToDuplex(authStoreForNodes) s.rootCAForChainValid[domain.ID] = graph.NodeSetToDuplex(rootCaForNodes) @@ -145,7 +145,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris return nil }) if err != nil { - log.Errorf(fmt.Sprintf("Error building adcs cache %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error building adcs cache %v", err)) } slog.InfoContext(ctx, "Finished building adcs cache") diff --git a/packages/go/analysis/ad/esc1.go b/packages/go/analysis/ad/esc1.go index 82023fc0c3..605a43d6e9 100644 --- a/packages/go/analysis/ad/esc1.go +++ b/packages/go/analysis/ad/esc1.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/analysis" @@ -291,16 +292,16 @@ func getGoldenCertEdgeComposition(tx graph.Transaction, edge *graph.Relationship query.KindIn(query.End(), ad.EnterpriseCA), query.KindIn(query.Relationship(), ad.HostsCAService), ))); err != nil { - log.Errorf(fmt.Sprintf("Error getting hostscaservice edge to enterprise ca for computer %d : %v", startNode.ID, err)) + slog.Error(fmt.Sprintf("Error getting hostscaservice edge to enterprise ca for computer %d : %v", startNode.ID, err)) } else { for _, ecaPath := range ecaPaths { eca := ecaPath.Terminal() if chainToRootCAPaths, err := FetchEnterpriseCAsCertChainPathToDomain(tx, eca, targetDomainNode); err != nil { - log.Errorf(fmt.Sprintf("Error getting eca %d path to domain %d: %v", eca.ID, targetDomainNode.ID, err)) + slog.Error(fmt.Sprintf("Error getting eca %d path to domain %d: %v", eca.ID, targetDomainNode.ID, err)) } else if chainToRootCAPaths.Len() == 0 { continue } else if trustedForAuthPaths, err := FetchEnterpriseCAsTrustedForAuthPathToDomain(tx, eca, targetDomainNode); err != nil { - log.Errorf(fmt.Sprintf("Error getting eca %d path to domain %d via trusted for auth: %v", eca.ID, targetDomainNode.ID, err)) + slog.Error(fmt.Sprintf("Error getting eca %d path to domain %d via trusted for auth: %v", eca.ID, targetDomainNode.ID, err)) } else if trustedForAuthPaths.Len() == 0 { continue } else { diff --git a/packages/go/analysis/ad/esc13.go b/packages/go/analysis/ad/esc13.go index d358533ffc..9e2771823c 100644 --- a/packages/go/analysis/ad/esc13.go +++ b/packages/go/analysis/ad/esc13.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/analysis" @@ -46,11 +47,11 @@ func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analys if isValid, err := isCertTemplateValidForESC13(template); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Checking esc13 cert template PostADCSESC13: %v", err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Error checking esc13 cert template PostADCSESC13: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error checking esc13 cert template PostADCSESC13: %v", err)) } else if !isValid { continue } else if groupNodes, err := getCertTemplateGroupLinks(template, tx); err != nil { - log.Errorf(fmt.Sprintf("Error getting cert template group links: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting cert template group links: %v", err)) } else if len(groupNodes) == 0 { continue } else { diff --git a/packages/go/analysis/ad/esc3.go b/packages/go/analysis/ad/esc3.go index d06cce81ad..49668172ba 100644 --- a/packages/go/analysis/ad/esc3.go +++ b/packages/go/analysis/ad/esc3.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "slices" "sync" @@ -68,7 +69,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi ) })); err != nil { if !graph.IsErrNotFound(err) { - log.Errorf(fmt.Sprintf("Error getting target nodes for esc3 for node %d: %v", certTemplateTwo.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting target nodes for esc3 for node %d: %v", certTemplateTwo.ID, err)) } } else { for _, certTemplateOne := range inboundTemplates { @@ -83,12 +84,12 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi ) if publishedECAs, err := FetchCertTemplateCAs(tx, certTemplateOne); err != nil { - log.Errorf(fmt.Sprintf("Error getting cas for cert template %d: %v", certTemplateOne.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting cas for cert template %d: %v", certTemplateOne.ID, err)) } else if publishedECAs.Len() == 0 { continue } else if eARestrictions { if delegatedAgents, err := fetchFirstDegreeNodes(tx, certTemplateTwo, ad.DelegatedEnrollmentAgent); err != nil { - log.Errorf(fmt.Sprintf("Error getting delegated agents for cert template %d: %v", certTemplateTwo.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting delegated agents for cert template %d: %v", certTemplateTwo.ID, err)) } else { for _, eca1 := range publishedECAs { tempResults := CalculateCrossProductNodeSets(tx, @@ -102,7 +103,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi // Add principals to result set unless it's a user and DNS is required if filteredResults, err := filterUserDNSResults(tx, tempResults, certTemplateOne); err != nil { - log.Errorf(fmt.Sprintf("Error filtering user dns results: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error filtering user dns results: %v", err)) } else { results.Or(filteredResults) } @@ -119,7 +120,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi ecaEnrollersTwo) if filteredResults, err := filterUserDNSResults(tx, tempResults, certTemplateOne); err != nil { - log.Errorf(fmt.Sprintf("Error filtering user dns results: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error filtering user dns results: %v", err)) } else { results.Or(filteredResults) } @@ -149,7 +150,7 @@ func PostEnrollOnBehalfOf(domains, enterpriseCertAuthorities, certTemplates []*g if version, err := node.Properties.Get(ad.SchemaVersion.String()).Float64(); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Did not get schema version for cert template %d: %v", node.ID, err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Error getting schema version for cert template %d: %v", node.ID, err)) + slog.Error(fmt.Sprintf("Error getting schema version for cert template %d: %v", node.ID, err)) } else if version == 1 { versionOneTemplates = append(versionOneTemplates, node) } else if version >= 2 { @@ -210,13 +211,13 @@ func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, p if hasBadEku, err := certTemplateHasEku(certTemplateOne, EkuAnyPurpose); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) + slog.Error(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if hasBadEku { continue } else if hasEku, err := certTemplateHasEku(certTemplateOne, EkuCertRequestAgent); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) + slog.Error(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if !hasEku { continue } else { @@ -224,15 +225,15 @@ func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, p if certTemplateOne.ID == certTemplateTwo.ID { continue } else if authorizedSignatures, err := certTemplateTwo.Properties.Get(ad.AuthorizedSignatures.String()).Float64(); err != nil { - log.Errorf(fmt.Sprintf("Error getting authorized signatures for cert template %d: %v", certTemplateTwo.ID, err)) + slog.Error(fmt.Sprintf("Error getting authorized signatures for cert template %d: %v", certTemplateTwo.ID, err)) } else if authorizedSignatures < 1 { continue } else if applicationPolicies, err := certTemplateTwo.Properties.Get(ad.ApplicationPolicies.String()).StringSlice(); err != nil { - log.Errorf(fmt.Sprintf("Error getting application policies for cert template %d: %v", certTemplateTwo.ID, err)) + slog.Error(fmt.Sprintf("Error getting application policies for cert template %d: %v", certTemplateTwo.ID, err)) } else if !slices.Contains(applicationPolicies, EkuCertRequestAgent) { continue } else if isLinked, err := DoesCertTemplateLinkToDomain(tx, certTemplateTwo, domainNode); err != nil { - log.Errorf(fmt.Sprintf("Error fetch paths from cert template %d to domain: %v", certTemplateTwo.ID, err)) + slog.Error(fmt.Sprintf("Error fetch paths from cert template %d to domain: %v", certTemplateTwo.ID, err)) } else if !isLinked { continue } else { @@ -273,13 +274,13 @@ func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates [] if hasEku, err := certTemplateHasEkuOrAll(certTemplateOne, EkuCertRequestAgent, EkuAnyPurpose); errors.Is(err, graph.ErrPropertyNotFound) { log.Warnf(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) } else if err != nil { - log.Errorf(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) + slog.Error(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) } else if !hasEku { continue } else { for _, certTemplateTwo := range versionOneCertTemplates { if hasPath, err := DoesCertTemplateLinkToDomain(tx, certTemplateTwo, domainNode); err != nil { - log.Errorf(fmt.Sprintf("Error getting domain node for certtemplate %d: %v", certTemplateTwo.ID, err)) + slog.Error(fmt.Sprintf("Error getting domain node for certtemplate %d: %v", certTemplateTwo.ID, err)) } else if !hasPath { continue } else { @@ -298,16 +299,16 @@ func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates [] func isStartCertTemplateValidESC3(template *graph.Node) bool { if reqManagerApproval, err := template.Properties.Get(ad.RequiresManagerApproval.String()).Bool(); err != nil { - log.Errorf(fmt.Sprintf("Error getting reqmanagerapproval for certtemplate %d: %v", template.ID, err)) + slog.Error(fmt.Sprintf("Error getting reqmanagerapproval for certtemplate %d: %v", template.ID, err)) } else if reqManagerApproval { return false } else if schemaVersion, err := template.Properties.Get(ad.SchemaVersion.String()).Float64(); err != nil { - log.Errorf(fmt.Sprintf("Error getting schemaversion for certtemplate %d: %v", template.ID, err)) + slog.Error(fmt.Sprintf("Error getting schemaversion for certtemplate %d: %v", template.ID, err)) } else if schemaVersion == 1 { return true } else if schemaVersion > 1 { if authorizedSignatures, err := template.Properties.Get(ad.AuthorizedSignatures.String()).Float64(); err != nil { - log.Errorf(fmt.Sprintf("Error getting authorizedsignatures for certtemplate %d: %v", template.ID, err)) + slog.Error(fmt.Sprintf("Error getting authorizedsignatures for certtemplate %d: %v", template.ID, err)) } else if authorizedSignatures > 0 { return false } else { @@ -323,7 +324,7 @@ func isEndCertTemplateValidESC3(template *graph.Node) bool { log.Warnf(fmt.Sprintf("Did not getting authenabled for cert template %d: %v", template.ID, err)) return false } else if err != nil { - log.Errorf(fmt.Sprintf("Error getting authenabled for cert template %d: %v", template.ID, err)) + slog.Error(fmt.Sprintf("Error getting authenabled for cert template %d: %v", template.ID, err)) return false } else if !authEnabled { return false @@ -331,7 +332,7 @@ func isEndCertTemplateValidESC3(template *graph.Node) bool { log.Warnf(fmt.Sprintf("Did not getting reqManagerApproval for cert template %d: %v", template.ID, err)) return false } else if err != nil { - log.Errorf(fmt.Sprintf("Error getting reqManagerApproval for cert template %d: %v", template.ID, err)) + slog.Error(fmt.Sprintf("Error getting reqManagerApproval for cert template %d: %v", template.ID, err)) return false } else if reqManagerApproval { return false @@ -618,10 +619,10 @@ func GetADCSESC3EdgeComposition(ctx context.Context, db graph.Database, edge *gr } if collected, err := eca2.Properties.Get(ad.EnrollmentAgentRestrictionsCollected.String()).Bool(); err != nil { - log.Errorf(fmt.Sprintf("Error getting enrollmentagentcollected for eca2 %d: %v", eca2.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting enrollmentagentcollected for eca2 %d: %v", eca2.ID, err)) } else if collected { if hasRestrictions, err := eca2.Properties.Get(ad.HasEnrollmentAgentRestrictions.String()).Bool(); err != nil { - log.Errorf(fmt.Sprintf("Error getting hasenrollmentagentrestrictions for ca %d: %v", eca2.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error getting hasenrollmentagentrestrictions for ca %d: %v", eca2.ID, err)) } else if hasRestrictions { // Verify p8 path exist diff --git a/packages/go/analysis/ad/esc_shared.go b/packages/go/analysis/ad/esc_shared.go index 91956894aa..ade6370eb3 100644 --- a/packages/go/analysis/ad/esc_shared.go +++ b/packages/go/analysis/ad/esc_shared.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "slices" "strings" @@ -178,7 +179,7 @@ func PostEnterpriseCAFor(operation analysis.StatTrackedOperation[analysis.Create func PostGoldenCert(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, domain, enterpriseCA *graph.Node) error { if hostCAServiceComputers, err := FetchHostsCAServiceComputers(tx, enterpriseCA); err != nil { - log.Errorf(fmt.Sprintf("Error fetching host ca computer for enterprise ca %d: %v", enterpriseCA.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error fetching host ca computer for enterprise ca %d: %v", enterpriseCA.ID, err)) } else { for _, computer := range hostCAServiceComputers { channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ diff --git a/packages/go/analysis/azure/application.go b/packages/go/analysis/azure/application.go index d51da4ba1c..1c5de33d9d 100644 --- a/packages/go/analysis/azure/application.go +++ b/packages/go/analysis/azure/application.go @@ -19,11 +19,11 @@ package azure import ( "context" "fmt" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) func NewApplicationDetails(node *graph.Node) ApplicationDetails { @@ -61,12 +61,12 @@ func getAppServicePrincipalID(tx graph.Transaction, node *graph.Node) (string, e return "", err } else if appServicePrincipals.Len() == 0 { // Don't want this to break the function, but we'll want to know about it - log.Errorf(fmt.Sprintf("Application node %d has no service principals attached", node.ID)) + slog.Error(fmt.Sprintf("Application node %d has no service principals attached", node.ID)) } else { servicePrincipal := appServicePrincipals.Pick() if servicePrincipalID, err = servicePrincipal.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", servicePrincipal.ID, node.ID, err)) + slog.Error(fmt.Sprintf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", servicePrincipal.ID, node.ID, err)) } } return servicePrincipalID, nil diff --git a/packages/go/analysis/azure/post.go b/packages/go/analysis/azure/post.go index d1fab25042..076b9bd7aa 100644 --- a/packages/go/analysis/azure/post.go +++ b/packages/go/analysis/azure/post.go @@ -19,6 +19,7 @@ package azure import ( "context" "fmt" + "log/slog" "strings" "github.com/specterops/bloodhound/analysis" @@ -240,7 +241,7 @@ func AppRoleAssignments(ctx context.Context, db graph.Database) (*analysis.Atomi return nil }); err != nil { if err := operation.Done(); err != nil { - log.Errorf(fmt.Sprintf("Error caught during azure AppRoleAssignments teardown: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error caught during azure AppRoleAssignments teardown: %v", err)) } return &operation.Stats, err @@ -723,7 +724,7 @@ func ExecuteCommand(ctx context.Context, db graph.Database) (*analysis.AtomicPos return nil }); err != nil { if err := operation.Done(); err != nil { - log.Errorf(fmt.Sprintf("Error caught during azure ExecuteCommand teardown: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error caught during azure ExecuteCommand teardown: %v", err)) } return &operation.Stats, err @@ -804,7 +805,7 @@ func globalAdmins(roleAssignments RoleAssignments, tenant *graph.Node, operation return nil }); err != nil { - log.Errorf(fmt.Sprintf("Failed to submit azure global admins post processing job: %v", err)) + slog.Error(fmt.Sprintf("Failed to submit azure global admins post processing job: %v", err)) } } @@ -822,7 +823,7 @@ func privilegedRoleAdmins(roleAssignments RoleAssignments, tenant *graph.Node, o return nil }); err != nil { - log.Errorf(fmt.Sprintf("Failed to submit privileged role admins post processing job: %v", err)) + slog.Error(fmt.Sprintf("Failed to submit privileged role admins post processing job: %v", err)) } } @@ -840,7 +841,7 @@ func privilegedAuthAdmins(roleAssignments RoleAssignments, tenant *graph.Node, o return nil }); err != nil { - log.Errorf(fmt.Sprintf("Failed to submit azure privileged auth admins post processing job: %v", err)) + slog.Error(fmt.Sprintf("Failed to submit azure privileged auth admins post processing job: %v", err)) } } @@ -864,7 +865,7 @@ func addMembers(roleAssignments RoleAssignments, operation analysis.StatTrackedO return nil }); err != nil { - log.Errorf(fmt.Sprintf("Failed to submit azure add members AddMemberAllGroupsTargetRoles post processing job: %v", err)) + slog.Error(fmt.Sprintf("Failed to submit azure add members AddMemberAllGroupsTargetRoles post processing job: %v", err)) } if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -888,7 +889,7 @@ func addMembers(roleAssignments RoleAssignments, operation analysis.StatTrackedO return nil }); err != nil { - log.Errorf(fmt.Sprintf("Failed to submit azure add members AddMemberGroupNotRoleAssignableTargetRoles post processing job: %v", err)) + slog.Error(fmt.Sprintf("Failed to submit azure add members AddMemberGroupNotRoleAssignableTargetRoles post processing job: %v", err)) } } } @@ -902,14 +903,14 @@ func UserRoleAssignments(ctx context.Context, db graph.Database) (*analysis.Atom for _, tenant := range tenantNodes { if roleAssignments, err := TenantRoleAssignments(ctx, db, tenant); err != nil { if err := operation.Done(); err != nil { - log.Errorf(fmt.Sprintf("Error caught during azure UserRoleAssignments.TenantRoleAssignments teardown: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error caught during azure UserRoleAssignments.TenantRoleAssignments teardown: %v", err)) } return &analysis.AtomicPostProcessingStats{}, err } else { if err := resetPassword(operation, tenant, roleAssignments); err != nil { if err := operation.Done(); err != nil { - log.Errorf(fmt.Sprintf("Error caught during azure UserRoleAssignments.resetPassword teardown: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error caught during azure UserRoleAssignments.resetPassword teardown: %v", err)) } return &analysis.AtomicPostProcessingStats{}, err diff --git a/packages/go/analysis/azure/queries.go b/packages/go/analysis/azure/queries.go index b5b08d4469..0a9f5dd8c4 100644 --- a/packages/go/analysis/azure/queries.go +++ b/packages/go/analysis/azure/queries.go @@ -29,7 +29,6 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" ) @@ -60,7 +59,7 @@ func FetchGraphDBTierZeroTaggedAssets(tx graph.Transaction, tenant *graph.Node) defer measure.LogAndMeasure(slog.LevelInfo, "FetchGraphDBTierZeroTaggedAssets", "tenant_id", tenant.ID)() if tenantObjectID, err := tenant.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err)) + slog.Error(fmt.Sprintf("Tenant node %d does not have a valid %s property: %v", tenant.ID, common.ObjectID, err)) return nil, err } else { if nodeSet, err := ops.FetchNodeSet(tx.Nodes().Filterf(func() graph.Criteria { diff --git a/packages/go/analysis/azure/service_principal.go b/packages/go/analysis/azure/service_principal.go index 7a7ede7ddd..0b48f8874f 100644 --- a/packages/go/analysis/azure/service_principal.go +++ b/packages/go/analysis/azure/service_principal.go @@ -19,6 +19,7 @@ package azure import ( "context" "fmt" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/azure" @@ -64,7 +65,7 @@ func getServicePrincipalAppID(tx graph.Transaction, node *graph.Node) (string, e app := servicePrincipalApps.Pick() if appID, err = app.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Errorf(fmt.Sprintf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", app.ID, node.ID, err)) + slog.Error(fmt.Sprintf("Failed to marshal the object ID of node %d while fetching the service principal ID of application node %d: %v", app.ID, node.ID, err)) } } return appID, nil diff --git a/packages/go/dawgs/drivers/neo4j/cypher.go b/packages/go/dawgs/drivers/neo4j/cypher.go index 836e11f3ce..bf6a3fc796 100644 --- a/packages/go/dawgs/drivers/neo4j/cypher.go +++ b/packages/go/dawgs/drivers/neo4j/cypher.go @@ -19,13 +19,13 @@ package neo4j import ( "bytes" "fmt" + "log/slog" "sort" "strings" "github.com/specterops/bloodhound/cypher/frontend" "github.com/specterops/bloodhound/cypher/models/cypher/format" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" ) func newUpdateKey(identityKind graph.Kind, identityProperties []string, updateKinds graph.Kinds) string { @@ -309,9 +309,9 @@ func stripCypherQuery(rawQuery string) string { ) if queryModel, err := frontend.ParseCypher(frontend.DefaultCypherContext(), rawQuery); err != nil { - log.Errorf(fmt.Sprintf("Error occurred parsing cypher query during sanitization: %v", err)) + slog.Error(fmt.Sprintf("Error occurred parsing cypher query during sanitization: %v", err)) } else if err = strippedEmitter.Write(queryModel, buffer); err != nil { - log.Errorf(fmt.Sprintf("Error occurred sanitizing cypher query: %v", err)) + slog.Error(fmt.Sprintf("Error occurred sanitizing cypher query: %v", err)) } return buffer.String() diff --git a/packages/go/dawgs/drivers/neo4j/transaction.go b/packages/go/dawgs/drivers/neo4j/transaction.go index aaa85fa090..b231dfc93d 100644 --- a/packages/go/dawgs/drivers/neo4j/transaction.go +++ b/packages/go/dawgs/drivers/neo4j/transaction.go @@ -25,8 +25,6 @@ import ( "strings" "github.com/specterops/bloodhound/dawgs/drivers" - "github.com/specterops/bloodhound/log" - "github.com/specterops/bloodhound/dawgs/query/neo4j" "github.com/specterops/bloodhound/dawgs/util/size" @@ -331,7 +329,7 @@ func (s *neo4jTransaction) Raw(stmt string, params map[string]any) graph.Result prettyParameters.WriteString(":") if marshalledValue, err := json.Marshal(value); err != nil { - log.Errorf(fmt.Sprintf("Unable to marshal query parameter %s", key)) + slog.Error(fmt.Sprintf("Unable to marshal query parameter %s", key)) } else { prettyParameters.Write(marshalledValue) } diff --git a/packages/go/ein/ad.go b/packages/go/ein/ad.go index c2a50f9347..8fb497f40d 100644 --- a/packages/go/ein/ad.go +++ b/packages/go/ein/ad.go @@ -18,6 +18,7 @@ package ein import ( "fmt" + "log/slog" "strconv" "strings" @@ -195,10 +196,10 @@ func ParseACEData(aces []ACE, targetID string, targetType graph.Kind) []Ingestib } if rightKind, err := analysis.ParseKind(ace.RightName); err != nil { - log.Errorf(fmt.Sprintf("Error during ParseACEData: %v", err)) + slog.Error(fmt.Sprintf("Error during ParseACEData: %v", err)) continue } else if !ad.IsACLKind(rightKind) { - log.Errorf(fmt.Sprintf("Non-ace edge type given to process aces: %s", ace.RightName)) + slog.Error(fmt.Sprintf("Non-ace edge type given to process aces: %s", ace.RightName)) continue } else { converted = append(converted, NewIngestibleRelationship( @@ -226,7 +227,7 @@ func convertSPNData(spns []SPNTarget, sourceID string) []IngestibleRelationship for _, s := range spns { if kind, err := analysis.ParseKind(s.Service); err != nil { - log.Errorf(fmt.Sprintf("Error during processSPNTargets: %v", err)) + slog.Error(fmt.Sprintf("Error during processSPNTargets: %v", err)) } else { converted = append(converted, NewIngestibleRelationship( IngestibleSource{ @@ -368,7 +369,7 @@ func ParseDomainTrusts(domain Domain) ParsedDomainTrustData { switch converted := trust.TrustAttributes.(type) { case string: if i, err := strconv.Atoi(converted); err != nil { - log.Errorf(fmt.Sprintf("Error converting trust attributes %s to int", converted)) + slog.Error(fmt.Sprintf("Error converting trust attributes %s to int", converted)) finalTrustAttributes = 0 } else { finalTrustAttributes = i @@ -376,7 +377,7 @@ func ParseDomainTrusts(domain Domain) ParsedDomainTrustData { case int: finalTrustAttributes = converted default: - log.Errorf(fmt.Sprintf("Error converting trust attributes %s to int", converted)) + slog.Error(fmt.Sprintf("Error converting trust attributes %s to int", converted)) finalTrustAttributes = 0 } diff --git a/packages/go/ein/azure.go b/packages/go/ein/azure.go index 754d966471..29cbcfea6a 100644 --- a/packages/go/ein/azure.go +++ b/packages/go/ein/azure.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "regexp" "slices" "strings" @@ -453,11 +454,11 @@ func ConvertAzureGroupMembersToRels(data models.GroupMembers) []IngestibleRelati member azure2.DirectoryObject ) if err := json.Unmarshal(raw.Member, &member); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure group member", err)) + slog.Error(fmt.Sprintf(SerialError, "azure group member", err)) } else if memberType, err := ExtractTypeFromDirectoryObject(member); errors.Is(err, ErrInvalidType) { log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(fmt.Sprintf(ExtractError, err)) + slog.Error(fmt.Sprintf(ExtractError, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ @@ -487,11 +488,11 @@ func ConvertAzureGroupOwnerToRels(data models.GroupOwners) []IngestibleRelations owner azure2.DirectoryObject ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure group owner", err)) + slog.Error(fmt.Sprintf(SerialError, "azure group owner", err)) } else if ownerType, err := ExtractTypeFromDirectoryObject(owner); errors.Is(err, ErrInvalidType) { log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(fmt.Sprintf(ExtractError, err)) + slog.Error(fmt.Sprintf(ExtractError, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ @@ -865,7 +866,7 @@ func ConvertAzureRoleAssignmentToRels(roleAssignment azure2.UnifiedRoleAssignmen if CanAddSecret(roleAssignment.RoleDefinitionId) && roleAssignment.DirectoryScopeId != "/" { if relType, err := GetAddSecretRoleKind(roleAssignment.RoleDefinitionId); err != nil { - log.Errorf(fmt.Sprintf("Error processing role assignment for role %s: %v", roleObjectId, err)) + slog.Error(fmt.Sprintf("Error processing role assignment for role %s: %v", roleObjectId, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ @@ -1073,11 +1074,11 @@ func ConvertAzureServicePrincipalOwnerToRels(data models.ServicePrincipalOwners) ) if err := json.Unmarshal(raw.Owner, &owner); err != nil { - log.Errorf(fmt.Sprintf(SerialError, "azure service principal owner", err)) + slog.Error(fmt.Sprintf(SerialError, "azure service principal owner", err)) } else if ownerType, err := ExtractTypeFromDirectoryObject(owner); errors.Is(err, ErrInvalidType) { log.Warnf(fmt.Sprintf(ExtractError, err)) } else if err != nil { - log.Errorf(fmt.Sprintf(ExtractError, err)) + slog.Error(fmt.Sprintf(ExtractError, err)) } else { relationships = append(relationships, NewIngestibleRelationship( IngestibleSource{ diff --git a/packages/go/log/handlers/handlers.go b/packages/go/log/handlers/handlers.go index a3e962cacf..755fceb646 100644 --- a/packages/go/log/handlers/handlers.go +++ b/packages/go/log/handlers/handlers.go @@ -87,12 +87,12 @@ type stackFrame struct { func GetSlogCallStack() slog.Attr { var outputFrames []stackFrame - pc := make([]uintptr, 25) + pc := make([]uintptr, 25) // Arbitrarily only go to a call depth of 25 n := runtime.Callers(1, pc) if n == 0 { return slog.Attr{} } - pc = pc[:n] // pass only valid pcs to runtime.CallersFrames + pc = pc[:n] frames := runtime.CallersFrames(pc) for { diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index 719b2b9473..018ed4ce18 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -39,7 +39,7 @@ func main() { } if lvl, err := log.ParseLevel(rawLvl); err != nil { - log.Errorf(fmt.Sprintf("Could not parse log level from %s: %v", environment.LogLevelVarName, err)) + slog.Error(fmt.Sprintf("Could not parse log level from %s: %v", environment.LogLevelVarName, err)) } else { log.SetGlobalLevel(lvl) } From 1aeb864f2fb6423128b89f7ef668c67cfd15e7e1 Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 12:26:28 -0500 Subject: [PATCH 11/20] BED-4153: Migrate log.Warn --- cmd/api/src/api/auth.go | 3 +-- cmd/api/src/api/marshalling.go | 7 +++--- cmd/api/src/api/middleware/compression.go | 3 +-- cmd/api/src/api/middleware/logging.go | 7 +++--- cmd/api/src/api/middleware/middleware.go | 2 +- cmd/api/src/api/v2/agi.go | 18 +++++++-------- cmd/api/src/api/v2/analysisrequest.go | 3 +-- cmd/api/src/api/v2/auth/sso.go | 6 ++--- cmd/api/src/api/v2/database_wipe.go | 5 ++-- cmd/api/src/api/v2/flag.go | 4 ++-- cmd/api/src/config/config.go | 3 +-- .../src/daemons/datapipe/azure_convertors.go | 5 ++-- cmd/api/src/daemons/datapipe/cleanup.go | 2 +- cmd/api/src/daemons/datapipe/jobs.go | 7 +++--- cmd/api/src/migrations/graph.go | 9 ++++---- cmd/api/src/model/appcfg/parameter.go | 22 +++++++++--------- cmd/api/src/model/samlprovider.go | 3 +-- cmd/api/src/queries/graph.go | 6 ++--- cmd/api/src/services/entrypoint.go | 3 +-- .../src/services/fileupload/file_upload.go | 3 +-- cmd/api/src/services/fileupload/validation.go | 5 ++-- .../utils/validation/duration_validator.go | 6 ++--- packages/go/analysis/ad/adcs.go | 23 +++++++++---------- packages/go/analysis/ad/adcscache.go | 13 +++++------ packages/go/analysis/ad/esc1.go | 7 +++--- packages/go/analysis/ad/esc10.go | 11 +++++---- packages/go/analysis/ad/esc13.go | 8 +++---- packages/go/analysis/ad/esc3.go | 19 ++++++++------- packages/go/analysis/ad/esc4.go | 20 ++++++++-------- packages/go/analysis/ad/esc6.go | 12 +++++----- packages/go/analysis/ad/esc9.go | 11 +++++---- packages/go/analysis/ad/esc_shared.go | 3 +-- packages/go/analysis/ad/ntlm.go | 4 ++-- packages/go/analysis/ad/post.go | 5 ++-- packages/go/analysis/azure/filters.go | 4 ++-- packages/go/analysis/azure/post.go | 2 +- .../go/analysis/azure/service_principal.go | 3 +-- packages/go/dawgs/drivers/pg/pg.go | 4 ++-- packages/go/dawgs/traversal/traversal.go | 8 +++---- packages/go/ein/azure.go | 7 +++--- .../go/stbernard/workspace/golang/build.go | 3 +-- packages/go/stbernard/workspace/yarn/yarn.go | 4 ++-- 42 files changed, 141 insertions(+), 162 deletions(-) diff --git a/cmd/api/src/api/auth.go b/cmd/api/src/api/auth.go index c06f5a4a5a..aa060ec536 100644 --- a/cmd/api/src/api/auth.go +++ b/cmd/api/src/api/auth.go @@ -37,7 +37,6 @@ import ( "github.com/golang-jwt/jwt/v4" "github.com/specterops/bloodhound/crypto" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/ctx" @@ -113,7 +112,7 @@ func (s authenticator) auditLogin(requestContext context.Context, commitID uuid. err := s.db.CreateAuditLog(requestContext, auditLog) if err != nil { - log.Warnf(fmt.Sprintf("failed to write login audit log %+v", err)) + slog.WarnContext(requestContext, fmt.Sprintf("failed to write login audit log %+v", err)) } } diff --git a/cmd/api/src/api/marshalling.go b/cmd/api/src/api/marshalling.go index 40d782c560..110e63eb98 100644 --- a/cmd/api/src/api/marshalling.go +++ b/cmd/api/src/api/marshalling.go @@ -27,7 +27,6 @@ import ( "time" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/api/stream" "github.com/specterops/bloodhound/src/model" @@ -77,15 +76,15 @@ type ResponseWrapper struct { func WriteErrorResponse(ctx context.Context, untypedError any, response http.ResponseWriter) { switch typedError := untypedError.(type) { case *ErrorResponse: // V1 error handling - log.Warnf(fmt.Sprintf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Error)) + slog.WarnContext(ctx, fmt.Sprintf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Error)) WriteJSONResponse(context.Background(), typedError.Error, typedError.HTTPStatus, response) case *ErrorWrapper: // V2 error handling - log.Warnf(fmt.Sprintf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Errors)) + slog.WarnContext(ctx, fmt.Sprintf("Writing API Error. Status: %v. Message: %v", typedError.HTTPStatus, typedError.Errors)) WriteJSONResponse(ctx, typedError, typedError.HTTPStatus, response) default: - log.Warnf(fmt.Sprintf("Failure Writing API Error. Status: %v. Message: %v", http.StatusInternalServerError, "Invalid error format returned")) + slog.WarnContext(ctx, fmt.Sprintf("Failure Writing API Error. Status: %v. Message: %v", http.StatusInternalServerError, "Invalid error format returned")) WriteJSONResponse(ctx, "An internal error has occurred that is preventing the service from servicing this request.", http.StatusInternalServerError, response) } } diff --git a/cmd/api/src/api/middleware/compression.go b/cmd/api/src/api/middleware/compression.go index e78b0eb7f8..a4e49cb5db 100644 --- a/cmd/api/src/api/middleware/compression.go +++ b/cmd/api/src/api/middleware/compression.go @@ -27,7 +27,6 @@ import ( "strings" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" ) @@ -66,7 +65,7 @@ func CompressionMiddleware(next http.Handler) http.Handler { request.Body, err = wrapBody(encoding, request.Body) if err != nil { errMsg := fmt.Sprintf("failed to create reader for %s encoding: %v", encoding, err) - log.Warnf(fmt.Sprintf(errMsg)) + slog.WarnContext(request.Context(), fmt.Sprintf(errMsg)) if errors.Is(err, errUnsupportedEncoding) { api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusUnsupportedMediaType, fmt.Sprintf("Error trying to read request: %s", errMsg), request), responseWriter) } else { diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index 19d4116db8..ce1ea71dbf 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -26,7 +26,6 @@ import ( "github.com/gofrs/uuid" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -149,9 +148,9 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http slog.LogAttrs(request.Context(), slog.LevelInfo, fmt.Sprintf("%s %s", request.Method, request.URL.RequestURI()), logAttrs...) if !deadline.IsZero() && time.Now().After(deadline) { - log.Warnf( - "%s %s took longer than the configured timeout of %d seconds", - request.Method, request.URL.RequestURI(), timeout.Seconds(), + slog.WarnContext( + request.Context(), + fmt.Sprintf("%s %s took longer than the configured timeout of %d seconds", request.Method, request.URL.RequestURI(), timeout.Seconds()), ) } }() diff --git a/cmd/api/src/api/middleware/middleware.go b/cmd/api/src/api/middleware/middleware.go index a22b744b38..67fa73cc53 100644 --- a/cmd/api/src/api/middleware/middleware.go +++ b/cmd/api/src/api/middleware/middleware.go @@ -157,7 +157,7 @@ func parseUserIP(r *http.Request) string { // The point of this code is to strip the port, so we don't need to save it. if host, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - log.Warnf(fmt.Sprintf("Error parsing remoteAddress '%s': %s", r.RemoteAddr, err)) + slog.WarnContext(r.Context(), fmt.Sprintf("Error parsing remoteAddress '%s': %s", r.RemoteAddr, err)) remoteIp = r.RemoteAddr } else { remoteIp = host diff --git a/cmd/api/src/api/v2/agi.go b/cmd/api/src/api/v2/agi.go index d3ca7fd9b2..82eae79f3c 100644 --- a/cmd/api/src/api/v2/agi.go +++ b/cmd/api/src/api/v2/agi.go @@ -19,6 +19,7 @@ package v2 import ( "errors" "fmt" + "log/slog" "net/http" "net/url" "regexp" @@ -33,7 +34,6 @@ import ( "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -271,14 +271,14 @@ func (s Resources) UpdateAssetGroupSelectors(response http.ResponseWriter, reque api.HandleDatabaseError(request, response, err) } else { if err := s.GraphQuery.UpdateSelectorTags(request.Context(), s.DB, result); err != nil { - log.Warnf(fmt.Sprintf("Failed updating asset group tags; will be retried upon next analysis run: %v", err)) + slog.WarnContext(request.Context(), fmt.Sprintf("Failed updating asset group tags; will be retried upon next analysis run: %v", err)) } if assetGroup.Tag == model.TierZeroAssetGroupTag { // When T0 asset group selectors are modified, entire analysis must be re-run var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-update-asset-group-selectors" } else { userId = user.ID.String() @@ -484,7 +484,7 @@ func parseAGMembersFromNodes(nodes graph.NodeSet, selectors model.AssetGroupSele // a member is custom if at least one selector exists for that object ID for _, agSelector := range selectors { if objectId, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Objectid is missing for node %d", node.ID)) + slog.Warn(fmt.Sprintf("Objectid is missing for node %d", node.ID)) } else if agSelector.Selector == objectId { isCustomMember = true } @@ -496,14 +496,14 @@ func parseAGMembersFromNodes(nodes graph.NodeSet, selectors model.AssetGroupSele ) if objectId, err := node.Properties.Get(common.ObjectID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Objectid is missing for node %d", node.ID)) + slog.Warn(fmt.Sprintf("Objectid is missing for node %d", node.ID)) memberObjectId = "" } else { memberObjectId = objectId } if name, err := node.Properties.Get(common.Name.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Name is missing for node %d", node.ID)) + slog.Warn(fmt.Sprintf("Name is missing for node %d", node.ID)) memberName = "" } else { memberName = name @@ -520,20 +520,20 @@ func parseAGMembersFromNodes(nodes graph.NodeSet, selectors model.AssetGroupSele if node.Kinds.ContainsOneOf(azure.Entity) { if tenantID, err := node.Properties.Get(azure.TenantID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("%s is missing for node %d", azure.TenantID.String(), node.ID)) + slog.Warn(fmt.Sprintf("%s is missing for node %d", azure.TenantID.String(), node.ID)) } else { agMember.EnvironmentKind = azure.Tenant.String() agMember.EnvironmentID = tenantID } } else if node.Kinds.ContainsOneOf(ad.Entity) { if domainSID, err := node.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("%s is missing for node %d", ad.DomainSID.String(), node.ID)) + slog.Warn(fmt.Sprintf("%s is missing for node %d", ad.DomainSID.String(), node.ID)) } else { agMember.EnvironmentKind = ad.Domain.String() agMember.EnvironmentID = domainSID } } else { - log.Warnf(fmt.Sprintf("Node %d is missing valid base entity, skipping AG Membership...", node.ID)) + slog.Warn(fmt.Sprintf("Node %d is missing valid base entity, skipping AG Membership...", node.ID)) continue } diff --git a/cmd/api/src/api/v2/analysisrequest.go b/cmd/api/src/api/v2/analysisrequest.go index ed0b448158..1e4b5e40da 100644 --- a/cmd/api/src/api/v2/analysisrequest.go +++ b/cmd/api/src/api/v2/analysisrequest.go @@ -23,7 +23,6 @@ import ( "log/slog" "net/http" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" @@ -46,7 +45,7 @@ func (s Resources) RequestAnalysis(response http.ResponseWriter, request *http.R var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user" } else { userId = user.ID.String() diff --git a/cmd/api/src/api/v2/auth/sso.go b/cmd/api/src/api/v2/auth/sso.go index e51dbd6520..34e7dd419d 100644 --- a/cmd/api/src/api/v2/auth/sso.go +++ b/cmd/api/src/api/v2/auth/sso.go @@ -19,6 +19,7 @@ package auth import ( "context" "fmt" + "log/slog" "net/http" "net/url" "path" @@ -27,7 +28,6 @@ import ( "github.com/gorilla/mux" "github.com/specterops/bloodhound/dawgs/cardinality" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -278,9 +278,9 @@ func SanitizeAndGetRoles(ctx context.Context, autoProvisionConfig model.SSOProvi case len(validRoles) == 1: return validRoles, nil case len(validRoles) > 1: - log.Warnf(fmt.Sprintf("[SSO] JIT Role Provision detected multiple valid roles - %s , falling back to default role %s", validRoles.Names(), defaultRole.Name)) + slog.WarnContext(ctx, fmt.Sprintf("[SSO] JIT Role Provision detected multiple valid roles - %s , falling back to default role %s", validRoles.Names(), defaultRole.Name)) default: - log.Warnf(fmt.Sprintf("[SSO] JIT Role Provision detected no valid roles from %s , falling back to default role %s", maybeBHRoles, defaultRole.Name)) + slog.WarnContext(ctx, fmt.Sprintf("[SSO] JIT Role Provision detected no valid roles from %s , falling back to default role %s", maybeBHRoles, defaultRole.Name)) } } diff --git a/cmd/api/src/api/v2/database_wipe.go b/cmd/api/src/api/v2/database_wipe.go index 0f21415648..22b84aa23f 100644 --- a/cmd/api/src/api/v2/database_wipe.go +++ b/cmd/api/src/api/v2/database_wipe.go @@ -23,7 +23,6 @@ import ( "net/http" "strings" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -113,7 +112,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt } else { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-database-wipe" } else { userId = user.ID.String() @@ -141,7 +140,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt if kickoffAnalysis { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-database-wipe" } else { userId = user.ID.String() diff --git a/cmd/api/src/api/v2/flag.go b/cmd/api/src/api/v2/flag.go index 4abeb09a66..b76405085c 100644 --- a/cmd/api/src/api/v2/flag.go +++ b/cmd/api/src/api/v2/flag.go @@ -18,11 +18,11 @@ package v2 import ( "fmt" + "log/slog" "net/http" "strconv" "github.com/gorilla/mux" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" @@ -64,7 +64,7 @@ func (s Resources) ToggleFlag(response http.ResponseWriter, request *http.Reques if featureFlag.Key == appcfg.FeatureAdcs && !featureFlag.Enabled { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - log.Warnf(fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) userId = "unknown-user-toggle-flag" } else { userId = user.ID.String() diff --git a/cmd/api/src/config/config.go b/cmd/api/src/config/config.go index e9d728a62f..b22eb1ce08 100644 --- a/cmd/api/src/config/config.go +++ b/cmd/api/src/config/config.go @@ -29,7 +29,6 @@ import ( "time" "github.com/specterops/bloodhound/crypto" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/serde" ) @@ -247,7 +246,7 @@ func SetValuesFromEnv(varPrefix string, target any, env []string) error { cfgKeyPath := strings.TrimPrefix(key, formattedPrefix) if err := SetValue(target, cfgKeyPath, valueStr); errors.Is(err, ErrInvalidConfigurationPath) { - log.Warnf(fmt.Sprintf("%s", err)) + slog.Warn(fmt.Sprintf("%s", err)) } else if err != nil { return err } diff --git a/cmd/api/src/daemons/datapipe/azure_convertors.go b/cmd/api/src/daemons/datapipe/azure_convertors.go index 78477315d2..0149c6083b 100644 --- a/cmd/api/src/daemons/datapipe/azure_convertors.go +++ b/cmd/api/src/daemons/datapipe/azure_convertors.go @@ -28,7 +28,6 @@ import ( azureModels "github.com/bloodhoundad/azurehound/v2/models/azure" "github.com/specterops/bloodhound/ein" "github.com/specterops/bloodhound/graphschema/azure" - "github.com/specterops/bloodhound/log" ) const ( @@ -193,7 +192,7 @@ func convertAzureAppOwner(raw json.RawMessage, converted *ConvertedAzureData) { if err := json.Unmarshal(raw.Owner, &owner); err != nil { slog.Error(fmt.Sprintf(SerialError, "app owner", err)) } else if ownerType, err := ein.ExtractTypeFromDirectoryObject(owner); errors.Is(err, ein.ErrInvalidType) { - log.Warnf(fmt.Sprintf(ExtractError, err)) + slog.Warn(fmt.Sprintf(ExtractError, err)) } else if err != nil { slog.Error(fmt.Sprintf(ExtractError, err)) } else { @@ -240,7 +239,7 @@ func convertAzureDeviceOwner(raw json.RawMessage, converted *ConvertedAzureData) if err := json.Unmarshal(raw.Owner, &owner); err != nil { slog.Error(fmt.Sprintf(SerialError, "device owner", err)) } else if ownerType, err := ein.ExtractTypeFromDirectoryObject(owner); errors.Is(err, ein.ErrInvalidType) { - log.Warnf(fmt.Sprintf(ExtractError, err)) + slog.Warn(fmt.Sprintf(ExtractError, err)) } else if err != nil { slog.Error(fmt.Sprintf(ExtractError, err)) } else { diff --git a/cmd/api/src/daemons/datapipe/cleanup.go b/cmd/api/src/daemons/datapipe/cleanup.go index 0ea3c3878f..228bfe2397 100644 --- a/cmd/api/src/daemons/datapipe/cleanup.go +++ b/cmd/api/src/daemons/datapipe/cleanup.go @@ -95,7 +95,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin if expectedDir != "" { expectedDir = strings.TrimSuffix(expectedDir, string(filepath.Separator)) if expectedDir != s.tempDirectoryRootPath { - log.Warnf(fmt.Sprintf("directory '%s' for expectedFileName '%s' does not match tempDirectoryRootPath '%s': skipping", expectedDir, expectedFileName, s.tempDirectoryRootPath)) + slog.WarnContext(ctx, fmt.Sprintf("directory '%s' for expectedFileName '%s' does not match tempDirectoryRootPath '%s': skipping", expectedDir, expectedFileName, s.tempDirectoryRootPath)) continue } } diff --git a/cmd/api/src/daemons/datapipe/jobs.go b/cmd/api/src/daemons/datapipe/jobs.go index 696ab1334f..49cbff5f3b 100644 --- a/cmd/api/src/daemons/datapipe/jobs.go +++ b/cmd/api/src/daemons/datapipe/jobs.go @@ -29,7 +29,6 @@ import ( "github.com/specterops/bloodhound/bomenc" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/util" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/database" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/model/appcfg" @@ -222,7 +221,7 @@ func (s *Daemon) processIngestFile(ctx context.Context, path string, fileType mo if err := file.Close(); err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Error closing ingest file %s: %v", filePath, err)) } else if err := os.Remove(filePath); errors.Is(err, fs.ErrNotExist) { - log.Warnf(fmt.Sprintf("Removing ingest file %s: %v", filePath, err)) + slog.WarnContext(ctx, fmt.Sprintf("Removing ingest file %s: %v", filePath, err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Error removing ingest file %s: %v", filePath, err)) } @@ -249,13 +248,13 @@ func (s *Daemon) processIngestTasks(ctx context.Context, ingestTasks model.Inges } if s.cfg.DisableIngest { - log.Warnf(fmt.Sprintf("Skipped processing of ingestTasks due to config flag.")) + slog.WarnContext(ctx, fmt.Sprintf("Skipped processing of ingestTasks due to config flag.")) return } total, failed, err := s.processIngestFile(ctx, ingestTask.FileName, ingestTask.FileType) if errors.Is(err, fs.ErrNotExist) { - log.Warnf(fmt.Sprintf("Did not process ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) + slog.WarnContext(ctx, fmt.Sprintf("Did not process ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed processing ingest task %d with file %s: %v", ingestTask.ID, ingestTask.FileName, err)) } else if job, err := s.db.GetFileUploadJob(ctx, ingestTask.TaskID.ValueOrZero()); err != nil { diff --git a/cmd/api/src/migrations/graph.go b/cmd/api/src/migrations/graph.go index fb633ae47b..bce35464d4 100644 --- a/cmd/api/src/migrations/graph.go +++ b/cmd/api/src/migrations/graph.go @@ -25,7 +25,6 @@ import ( "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/version" ) @@ -85,16 +84,16 @@ func GetMigrationData(ctx context.Context, db graph.Database) (version.Version, return err }); err != nil { - log.Warnf(fmt.Sprintf("Unable to fetch migration data from graph: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Unable to fetch migration data from graph: %v", err)) return currentMigration, ErrNoMigrationData } else if major, err := node.Properties.Get("Major").Int(); err != nil { - log.Warnf(fmt.Sprintf("Unable to get Major property from migration data node: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Unable to get Major property from migration data node: %v", err)) return currentMigration, ErrNoMigrationData } else if minor, err := node.Properties.Get("Minor").Int(); err != nil { - log.Warnf(fmt.Sprintf("unable to get Minor property from migration data node: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("unable to get Minor property from migration data node: %v", err)) return currentMigration, ErrNoMigrationData } else if patch, err := node.Properties.Get("Patch").Int(); err != nil { - log.Warnf(fmt.Sprintf("unable to get Patch property from migration data node: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("unable to get Patch property from migration data node: %v", err)) return currentMigration, ErrNoMigrationData } else { currentMigration.Major = major diff --git a/cmd/api/src/model/appcfg/parameter.go b/cmd/api/src/model/appcfg/parameter.go index 730dce676e..924fadd60f 100644 --- a/cmd/api/src/model/appcfg/parameter.go +++ b/cmd/api/src/model/appcfg/parameter.go @@ -21,12 +21,12 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "reflect" "time" iso8601 "github.com/channelmeter/iso8601duration" "github.com/specterops/bloodhound/dawgs/drivers/neo4j" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/database/types" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/utils" @@ -170,10 +170,10 @@ func GetPasswordExpiration(ctx context.Context, service ParameterService) time.D var expiration PasswordExpiration if cfg, err := service.GetConfigurationParameter(ctx, PasswordExpirationWindow); err != nil { - log.Warnf(fmt.Sprintf("Failed to fetch password expiratio configuration; returning default values")) + slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch password expiratio configuration; returning default values")) return DefaultPasswordExpirationWindow } else if err := cfg.Map(&expiration); err != nil { - log.Warnf(fmt.Sprintf("Invalid password expiration configuration supplied; returning default values")) + slog.WarnContext(ctx, fmt.Sprintf("Invalid password expiration configuration supplied; returning default values")) return DefaultPasswordExpirationWindow } @@ -194,9 +194,9 @@ func GetNeo4jParameters(ctx context.Context, service ParameterService) Neo4jPara } if neo4jParametersCfg, err := service.GetConfigurationParameter(ctx, Neo4jConfigs); err != nil { - log.Warnf(fmt.Sprintf("Failed to fetch neo4j configuration; returning default values")) + slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch neo4j configuration; returning default values")) } else if err = neo4jParametersCfg.Map(&result); err != nil { - log.Warnf(fmt.Sprintf("Invalid neo4j configuration supplied; returning default values")) + slog.WarnContext(ctx, fmt.Sprintf("Invalid neo4j configuration supplied; returning default values")) } return result @@ -212,9 +212,9 @@ func GetCitrixRDPSupport(ctx context.Context, service ParameterService) bool { var result CitrixRDPSupport if cfg, err := service.GetConfigurationParameter(ctx, CitrixRDPSupportKey); err != nil { - log.Warnf(fmt.Sprintf("Failed to fetch CitrixRDPSupport configuration; returning default values")) + slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch CitrixRDPSupport configuration; returning default values")) } else if err := cfg.Map(&result); err != nil { - log.Warnf(fmt.Sprintf("Invalid CitrixRDPSupport configuration supplied, %v. returning default values.", err)) + slog.WarnContext(ctx, fmt.Sprintf("Invalid CitrixRDPSupport configuration supplied, %v. returning default values.", err)) } return result.Enabled @@ -260,9 +260,9 @@ func GetPruneTTLParameters(ctx context.Context, service ParameterService) PruneT } if pruneTTLParametersCfg, err := service.GetConfigurationParameter(ctx, PruneTTL); err != nil { - log.Warnf(fmt.Sprintf("Failed to fetch prune TTL configuration; returning default values")) + slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch prune TTL configuration; returning default values")) } else if err = pruneTTLParametersCfg.Map(&result); err != nil { - log.Warnf(fmt.Sprintf("Invalid prune TTL configuration supplied; returning default values %+v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Invalid prune TTL configuration supplied; returning default values %+v", err)) } return result @@ -278,9 +278,9 @@ func GetReconciliationParameter(ctx context.Context, service ParameterService) b result := ReconciliationParameter{Enabled: true} if cfg, err := service.GetConfigurationParameter(ctx, ReconciliationKey); err != nil { - log.Warnf(fmt.Sprintf("Failed to fetch reconciliation configuration; returning default values")) + slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch reconciliation configuration; returning default values")) } else if err := cfg.Map(&result); err != nil { - log.Warnf(fmt.Sprintf("Invalid reconciliation configuration supplied, %v. returning default values.", err)) + slog.WarnContext(ctx, fmt.Sprintf("Invalid reconciliation configuration supplied, %v. returning default values.", err)) } return result.Enabled diff --git a/cmd/api/src/model/samlprovider.go b/cmd/api/src/model/samlprovider.go index 5c95e6ddfa..d28e20521e 100644 --- a/cmd/api/src/model/samlprovider.go +++ b/cmd/api/src/model/samlprovider.go @@ -24,7 +24,6 @@ import ( "path" "github.com/crewjam/saml" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/database/types/null" "github.com/specterops/bloodhound/src/serde" ) @@ -141,7 +140,7 @@ func assertionFindString(assertion *saml.Assertion, names ...string) (string, er return value.Value, nil } } - log.Warnf(fmt.Sprintf("[SAML] Found attribute values for attribute %s however none of the values have an XML type of %s. Choosing the first value.", ObjectIDAttributeNameFormat, XMLTypeString)) + slog.Warn(fmt.Sprintf("[SAML] Found attribute values for attribute %s however none of the values have an XML type of %s. Choosing the first value.", ObjectIDAttributeNameFormat, XMLTypeString)) return attribute.Values[0].Value, nil } } diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index 3db84ca3b1..caed3071fd 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -493,7 +493,7 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i timeoutLog.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) timeoutLog.Msg("Neo4j timed out while executing cypher query") } else { - log.Warnf(fmt.Sprintf("RawCypherQuery failed: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("RawCypherQuery failed: %v", err)) } return graphResponse, err } @@ -636,7 +636,7 @@ func (s *GraphQuery) GetEntityCountResults(ctx context.Context, node *graph.Node defer waitGroup.Done() if result, err := runEntityQuery(ctx, s.Graph, delegate, node, 0, 0); errors.Is(err, graph.ErrContextTimedOut) { - log.Warnf(fmt.Sprintf("Running entity query for key %s: %v", delegateKey, err)) + slog.WarnContext(ctx, fmt.Sprintf("Running entity query for key %s: %v", delegateKey, err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Error running entity query for key %s: %v", delegateKey, err)) data.Store(delegateKey, 0) @@ -788,7 +788,7 @@ func (s *GraphQuery) cacheQueryResult(queryStart time.Time, cacheKey string, res if set, sizeInBytes, err := s.Cache.GuardedSet(cacheKey, result); err != nil { slog.Error(fmt.Sprintf("[Entity Results Cache] Failed to write results to cache for key: %s", cacheKey)) } else if !set { - log.Warnf(fmt.Sprintf("[Entity Results Cache] Cache entry for query %s not set because it already exists", cacheKey)) + slog.Warn(fmt.Sprintf("[Entity Results Cache] Cache entry for query %s not set because it already exists", cacheKey)) } else { slog.Info(fmt.Sprintf("[Entity Results Cache] Cached slow query %s (%d bytes) because it took %dms", cacheKey, sizeInBytes, queryTime)) } diff --git a/cmd/api/src/services/entrypoint.go b/cmd/api/src/services/entrypoint.go index 49e8403fb5..5607d2ac45 100644 --- a/cmd/api/src/services/entrypoint.go +++ b/cmd/api/src/services/entrypoint.go @@ -25,7 +25,6 @@ import ( "github.com/specterops/bloodhound/cache" "github.com/specterops/bloodhound/dawgs/graph" schema "github.com/specterops/bloodhound/graphschema" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/api/registration" "github.com/specterops/bloodhound/src/api/router" @@ -113,7 +112,7 @@ func Entrypoint(ctx context.Context, cfg config.Configuration, connections boots // Trigger analysis on first start if err := connections.RDMS.RequestAnalysis(ctx, "init"); err != nil { - log.Warnf(fmt.Sprintf("failed to request init analysis: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to request init analysis: %v", err)) } return []daemons.Daemon{ diff --git a/cmd/api/src/services/fileupload/file_upload.go b/cmd/api/src/services/fileupload/file_upload.go index 381f2f1485..eecef5c2b4 100644 --- a/cmd/api/src/services/fileupload/file_upload.go +++ b/cmd/api/src/services/fileupload/file_upload.go @@ -29,7 +29,6 @@ import ( "github.com/specterops/bloodhound/bomenc" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/model/ingest" @@ -68,7 +67,7 @@ func ProcessStaleFileUploadJobs(ctx context.Context, db FileUploadData) { } else { for _, job := range jobs { if job.LastIngest.Before(threshold) { - log.Warnf(fmt.Sprintf("Ingest timeout: No ingest activity observed for Job ID %d in %f minutes (last ingest was %s)). Upload incomplete", + slog.WarnContext(ctx, fmt.Sprintf("Ingest timeout: No ingest activity observed for Job ID %d in %f minutes (last ingest was %s)). Upload incomplete", job.ID, now.Sub(threshold).Minutes(), job.LastIngest.Format(time.RFC3339))) diff --git a/cmd/api/src/services/fileupload/validation.go b/cmd/api/src/services/fileupload/validation.go index e354accdb8..e261ea13c5 100644 --- a/cmd/api/src/services/fileupload/validation.go +++ b/cmd/api/src/services/fileupload/validation.go @@ -19,10 +19,9 @@ package fileupload import ( "encoding/json" "errors" - "fmt" "io" + "log/slog" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/model/ingest" ) @@ -73,7 +72,7 @@ func ValidateMetaTag(reader io.Reader, readToEnd bool) (ingest.Metadata, error) case string: if !metaTagFound && depth == 1 && typed == "meta" { if err := decoder.Decode(&meta); err != nil { - log.Warnf(fmt.Sprintf("Found invalid metatag, skipping")) + slog.Warn("Found invalid metatag, skipping") } else if meta.Type.IsValid() { metaTagFound = true } diff --git a/cmd/api/src/utils/validation/duration_validator.go b/cmd/api/src/utils/validation/duration_validator.go index c15a4d6bc9..1dcae40db9 100644 --- a/cmd/api/src/utils/validation/duration_validator.go +++ b/cmd/api/src/utils/validation/duration_validator.go @@ -18,10 +18,10 @@ package validation import ( "fmt" + "log/slog" "time" iso8601 "github.com/channelmeter/iso8601duration" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/utils" ) @@ -42,7 +42,7 @@ func NewDurationValidator(params map[string]string) Validator { if minD, ok := params["min"]; ok { validator.min = params["min"] if duration, err := iso8601.FromString(minD); err != nil { - log.Warnf(fmt.Sprintf("NewDurationValidator invalid min limit provided %s", minD)) + slog.Warn(fmt.Sprintf("NewDurationValidator invalid min limit provided %s", minD)) } else { validator.minD = duration.ToDuration() } @@ -51,7 +51,7 @@ func NewDurationValidator(params map[string]string) Validator { if maxD, ok := params["max"]; ok { validator.max = params["max"] if duration, err := iso8601.FromString(maxD); err != nil { - log.Warnf(fmt.Sprintf("NewDurationValidator invalid max limit provided %s", maxD)) + slog.Warn(fmt.Sprintf("NewDurationValidator invalid max limit provided %s", maxD)) } else { validator.maxD = duration.ToDuration() } diff --git a/packages/go/analysis/ad/adcs.go b/packages/go/analysis/ad/adcs.go index 63f029ecc5..d08cb1479d 100644 --- a/packages/go/analysis/ad/adcs.go +++ b/packages/go/analysis/ad/adcs.go @@ -26,7 +26,6 @@ import ( "github.com/specterops/bloodhound/analysis/impact" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) var ( @@ -116,7 +115,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostGoldenCert(ctx, tx, outC, domain, enterpriseCA); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.GoldenCert.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.GoldenCert.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.GoldenCert.String(), err)) } @@ -125,7 +124,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC1(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC1.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC1.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC1.String(), err)) } @@ -134,7 +133,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC3(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC3.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC3.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC3.String(), err)) } @@ -143,7 +142,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC4(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC4.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC4.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC4.String(), err)) } @@ -152,7 +151,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC6a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6a.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6a.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6a.String(), err)) } @@ -161,7 +160,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC6b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6b.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6b.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6b.String(), err)) } @@ -170,7 +169,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC9a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9a.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9a.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9a.String(), err)) } @@ -179,7 +178,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC9b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9b.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9b.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9b.String(), err)) } @@ -188,7 +187,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC10a(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10a.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10a.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10a.String(), err)) } @@ -197,7 +196,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC10b(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10b.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10b.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10b.String(), err)) } @@ -206,7 +205,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA, domain *graph.N operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostADCSESC13(ctx, tx, outC, groupExpansions, enterpriseCA, domain, cache); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC13.String(), err)) + slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC13.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC13.String(), err)) } diff --git a/packages/go/analysis/ad/adcscache.go b/packages/go/analysis/ad/adcscache.go index 6b9efb6e03..05e2457957 100644 --- a/packages/go/analysis/ad/adcscache.go +++ b/packages/go/analysis/ad/adcscache.go @@ -26,7 +26,6 @@ import ( "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/ein" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) type ADCSCache struct { @@ -77,7 +76,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris // Check if Auth. Users or Everyone has enroll if domainsid, err := ct.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for certtemplate %d: %v", ct.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for certtemplate %d: %v", ct.ID, err)) } else if authUsersOrEveryoneHasEnroll, err := containsAuthUsersOrEveryone(tx, firstDegreePrincipals.Slice(), domainsid); err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Error fetching if auth. users or everyone has enroll on certtemplate %d: %v", ct.ID, err)) } else { @@ -102,7 +101,7 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris // Check if Auth. Users or Everyone has enroll if domainsid, err := eca.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for eca %d: %v", eca.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for eca %d: %v", eca.ID, err)) } else if authUsersOrEveryoneHasEnroll, err := containsAuthUsersOrEveryone(tx, firstDegreeEnrollers.Slice(), domainsid); err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Error fetching if auth. users or everyone has enroll on enterprise ca %d: %v", eca.ID, err)) } else { @@ -129,13 +128,13 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris // Check for weak cert config on DCs if upnMapping, err := hasUPNCertMappingInForest(tx, domain); err != nil { - log.Warnf(fmt.Sprintf("Error checking hasUPNCertMappingInForest for domain %d: %v", domain.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error checking hasUPNCertMappingInForest for domain %d: %v", domain.ID, err)) return nil } else if upnMapping { s.hasUPNCertMappingInForest.Add(domain.ID.Uint64()) } if weakCertBinding, err := hasWeakCertBindingInForest(tx, domain); err != nil { - log.Warnf(fmt.Sprintf("Error checking hasWeakCertBindingInForest for domain %d: %v", domain.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error checking hasWeakCertBindingInForest for domain %d: %v", domain.ID, err)) return nil } else if weakCertBinding { s.hasWeakCertBindingInForest.Add(domain.ID.Uint64()) @@ -250,7 +249,7 @@ func hasUPNCertMappingInForest(tx graph.Transaction, domain *graph.Node) (bool, } else { for _, trustedByDomain := range trustedByNodes { if dcForNodes, err := FetchNodesWithDCForEdge(tx, trustedByDomain); err != nil { - log.Warnf(fmt.Sprintf("unable to fetch DCFor nodes in hasUPNCertMappingInForest: %v", err)) + slog.Warn(fmt.Sprintf("unable to fetch DCFor nodes in hasUPNCertMappingInForest: %v", err)) continue } else { for _, dcForNode := range dcForNodes { @@ -275,7 +274,7 @@ func hasWeakCertBindingInForest(tx graph.Transaction, domain *graph.Node) (bool, } else { for _, trustedByDomain := range trustedByNodes { if dcForNodes, err := FetchNodesWithDCForEdge(tx, trustedByDomain); err != nil { - log.Warnf(fmt.Sprintf("unable to fetch DCFor nodes in hasWeakCertBindingInForest: %v", err)) + slog.Warn(fmt.Sprintf("unable to fetch DCFor nodes in hasWeakCertBindingInForest: %v", err)) continue } else { for _, dcForNode := range dcForNodes { diff --git a/packages/go/analysis/ad/esc1.go b/packages/go/analysis/ad/esc1.go index 605a43d6e9..a249d6771c 100644 --- a/packages/go/analysis/ad/esc1.go +++ b/packages/go/analysis/ad/esc1.go @@ -31,7 +31,6 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, expandedGroups impact.PathAggregator, enterpriseCA, domain *graph.Node, cache ADCSCache) error { @@ -42,12 +41,12 @@ func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysi ecaEnrollers := cache.GetEnterpriseCAEnrollers(enterpriseCA.ID) for _, certTemplate := range publishedCertTemplates { if valid, err := isCertTemplateValidForEsc1(certTemplate); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) continue } else if !valid { continue } else if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) continue } else { results.Or(CalculateCrossProductNodeSets(tx, domainsid, expandedGroups, cache.GetCertTemplateEnrollers(certTemplate.ID), ecaEnrollers)) @@ -203,7 +202,7 @@ func GetADCSESC1EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc10.go b/packages/go/analysis/ad/esc10.go index 9c3d4e15ba..9cadb50dc5 100644 --- a/packages/go/analysis/ad/esc10.go +++ b/packages/go/analysis/ad/esc10.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/analysis" @@ -46,7 +47,7 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC10(template, false); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue @@ -57,10 +58,10 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if filteredVictims, err := filterUserDNSResults(tx, victimBitmap, template); err != nil { - log.Warnf(fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) continue } else if attackers, err := FetchAttackersForEscalations9and10(tx, filteredVictims, false); err != nil { - log.Warnf(fmt.Sprintf("Error getting start nodes for esc10a attacker nodes: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting start nodes for esc10a attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) @@ -92,7 +93,7 @@ func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analy for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC10(template, true); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue @@ -103,7 +104,7 @@ func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analy victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(enterpriseCA.ID)) if attackers, err := FetchAttackersForEscalations9and10(tx, victimBitmap, true); err != nil { - log.Warnf(fmt.Sprintf("Error getting start nodes for esc10b attacker nodes: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting start nodes for esc10b attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) diff --git a/packages/go/analysis/ad/esc13.go b/packages/go/analysis/ad/esc13.go index 9e2771823c..860d918d68 100644 --- a/packages/go/analysis/ad/esc13.go +++ b/packages/go/analysis/ad/esc13.go @@ -37,7 +37,7 @@ import ( func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca, domain *graph.Node, cache ADCSCache) error { if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { return nil @@ -45,7 +45,7 @@ func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analys ecaEnrollers := cache.GetEnterpriseCAEnrollers(eca.ID) for _, template := range publishedCertTemplates { if isValid, err := isCertTemplateValidForESC13(template); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Checking esc13 cert template PostADCSESC13: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Checking esc13 cert template PostADCSESC13: %v", err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Error checking esc13 cert template PostADCSESC13: %v", err)) } else if !isValid { @@ -57,7 +57,7 @@ func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analys } else { controlBitmap := CalculateCrossProductNodeSets(tx, domainsid, groupExpansions, ecaEnrollers, cache.GetCertTemplateEnrollers(template.ID)) if filtered, err := filterUserDNSResults(tx, controlBitmap, template); err != nil { - log.Warnf(fmt.Sprintf("Error filtering users from victims for esc13: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Error filtering users from victims for esc13: %v", err)) continue } else { for _, group := range groupNodes.Slice() { @@ -225,7 +225,7 @@ func GetADCSESC13EdgeComposition(ctx context.Context, db graph.Database, edge *g // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc3.go b/packages/go/analysis/ad/esc3.go index 49668172ba..35f541d5e4 100644 --- a/packages/go/analysis/ad/esc3.go +++ b/packages/go/analysis/ad/esc3.go @@ -33,13 +33,12 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca2, domain *graph.Node, cache ADCSCache) error { results := cardinality.NewBitmap64() if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if publishedCertTemplates := cache.GetPublishedTemplateCache(eca2.ID); len(publishedCertTemplates) == 0 { return nil @@ -148,7 +147,7 @@ func PostEnrollOnBehalfOf(domains, enterpriseCertAuthorities, certTemplates []*g versionTwoTemplates := make([]*graph.Node, 0) for _, node := range certTemplates { if version, err := node.Properties.Get(ad.SchemaVersion.String()).Float64(); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Did not get schema version for cert template %d: %v", node.ID, err)) + slog.Warn(fmt.Sprintf("Did not get schema version for cert template %d: %v", node.ID, err)) } else if err != nil { slog.Error(fmt.Sprintf("Error getting schema version for cert template %d: %v", node.ID, err)) } else if version == 1 { @@ -156,7 +155,7 @@ func PostEnrollOnBehalfOf(domains, enterpriseCertAuthorities, certTemplates []*g } else if version >= 2 { versionTwoTemplates = append(versionTwoTemplates, node) } else { - log.Warnf(fmt.Sprintf("Got cert template %d with an invalid version %d", node.ID, version)) + slog.Warn(fmt.Sprintf("Got cert template %d with an invalid version %d", node.ID, version)) } } @@ -209,13 +208,13 @@ func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, p results := make([]analysis.CreatePostRelationshipJob, 0) for _, certTemplateOne := range publishedTemplates { if hasBadEku, err := certTemplateHasEku(certTemplateOne, EkuAnyPurpose); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) + slog.Warn(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if err != nil { slog.Error(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if hasBadEku { continue } else if hasEku, err := certTemplateHasEku(certTemplateOne, EkuCertRequestAgent); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) + slog.Warn(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if err != nil { slog.Error(fmt.Sprintf("Error getting EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) } else if !hasEku { @@ -272,7 +271,7 @@ func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates [] for _, certTemplateOne := range publishedTemplates { //prefilter as much as we can first if hasEku, err := certTemplateHasEkuOrAll(certTemplateOne, EkuCertRequestAgent, EkuAnyPurpose); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) + slog.Warn(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) } else if err != nil { slog.Error(fmt.Sprintf("Error checking ekus for certtemplate %d: %v", certTemplateOne.ID, err)) } else if !hasEku { @@ -321,7 +320,7 @@ func isStartCertTemplateValidESC3(template *graph.Node) bool { func isEndCertTemplateValidESC3(template *graph.Node) bool { if authEnabled, err := template.Properties.Get(ad.AuthenticationEnabled.String()).Bool(); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Did not getting authenabled for cert template %d: %v", template.ID, err)) + slog.Warn(fmt.Sprintf("Did not getting authenabled for cert template %d: %v", template.ID, err)) return false } else if err != nil { slog.Error(fmt.Sprintf("Error getting authenabled for cert template %d: %v", template.ID, err)) @@ -329,7 +328,7 @@ func isEndCertTemplateValidESC3(template *graph.Node) bool { } else if !authEnabled { return false } else if reqManagerApproval, err := template.Properties.Get(ad.RequiresManagerApproval.String()).Bool(); errors.Is(err, graph.ErrPropertyNotFound) { - log.Warnf(fmt.Sprintf("Did not getting reqManagerApproval for cert template %d: %v", template.ID, err)) + slog.Warn(fmt.Sprintf("Did not getting reqManagerApproval for cert template %d: %v", template.ID, err)) return false } else if err != nil { slog.Error(fmt.Sprintf("Error getting reqManagerApproval for cert template %d: %v", template.ID, err)) @@ -434,7 +433,7 @@ func GetADCSESC3EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc4.go b/packages/go/analysis/ad/esc4.go index a3657f6491..1000978988 100644 --- a/packages/go/analysis/ad/esc4.go +++ b/packages/go/analysis/ad/esc4.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/analysis" @@ -30,7 +31,6 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA, domain *graph.Node, cache ADCSCache) error { @@ -39,24 +39,24 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi publishedTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID) domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String() if err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } // 2. iterate certtemplates that have an outbound `PublishedTo` edge to eca for _, certTemplate := range publishedTemplates { if principalsWithGenericWrite, err := FetchPrincipalsWithGenericWriteOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf(fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.GenericWrite, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.GenericWrite, err)) } else if principalsWithEnrollOrAllExtendedRights, err := FetchPrincipalsWithEnrollOrAllExtendedRightsOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf(fmt.Sprintf("Error fetching principals with %s or %s on cert template: %v", ad.Enroll, ad.AllExtendedRights, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error fetching principals with %s or %s on cert template: %v", ad.Enroll, ad.AllExtendedRights, err)) } else if principalsWithPKINameFlag, err := FetchPrincipalsWithWritePKINameFlagOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf(fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.WritePKINameFlag, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.WritePKINameFlag, err)) } else if principalsWithPKIEnrollmentFlag, err := FetchPrincipalsWithWritePKIEnrollmentFlagOnCertTemplate(tx, certTemplate); err != nil { - log.Warnf(fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.WritePKIEnrollmentFlag, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error fetching principals with %s on cert template: %v", ad.WritePKIEnrollmentFlag, err)) } else if enrolleeSuppliesSubject, err := certTemplate.Properties.Get(string(ad.EnrolleeSuppliesSubject)).Bool(); err != nil { - log.Warnf(fmt.Sprintf("Error fetching %s property on cert template: %v", ad.EnrolleeSuppliesSubject, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error fetching %s property on cert template: %v", ad.EnrolleeSuppliesSubject, err)) } else if requiresManagerApproval, err := certTemplate.Properties.Get(string(ad.RequiresManagerApproval)).Bool(); err != nil { - log.Warnf(fmt.Sprintf("Error fetching %s property on cert template: %v", ad.RequiresManagerApproval, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error fetching %s property on cert template: %v", ad.RequiresManagerApproval, err)) } else { var ( @@ -86,7 +86,7 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi // 2c. kick out early if cert template does meet conditions for ESC4 if valid, err := isCertTemplateValidForESC4(certTemplate); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", certTemplate.ID, err)) continue } else if !valid { continue @@ -622,7 +622,7 @@ func GetADCSESC4EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc6.go b/packages/go/analysis/ad/esc6.go index eda87dcf9d..830c859cf4 100644 --- a/packages/go/analysis/ad/esc6.go +++ b/packages/go/analysis/ad/esc6.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/ein" @@ -32,12 +33,11 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA, domain *graph.Node, cache ADCSCache) error { if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if isUserSpecifiesSanEnabled, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabled.String()).Bool(); err != nil { return err @@ -53,7 +53,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys ) for _, publishedCertTemplate := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC6(publishedCertTemplate, false); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", publishedCertTemplate.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", publishedCertTemplate.ID, err)) continue } else if !valid { continue @@ -81,7 +81,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA, domain *graph.Node, cache ADCSCache) error { if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil } else if isUserSpecifiesSanEnabled, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabled.String()).Bool(); err != nil { return err @@ -99,7 +99,7 @@ func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analys ) for _, publishedCertTemplate := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC6(publishedCertTemplate, true); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", publishedCertTemplate.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", publishedCertTemplate.ID, err)) continue } else if !valid { continue @@ -277,7 +277,7 @@ func GetADCSESC6EdgeComposition(ctx context.Context, db graph.Database, edge *gr // Add startnode, Auth. Users, and Everyone to start nodes if domainsid, err := endNode.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting domain SID for domain %d: %v", endNode.ID, err)) return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if nodeSet, err := FetchAuthUsersAndEveryoneGroups(tx, domainsid); err != nil { diff --git a/packages/go/analysis/ad/esc9.go b/packages/go/analysis/ad/esc9.go index 144a0aa960..6f91387443 100644 --- a/packages/go/analysis/ad/esc9.go +++ b/packages/go/analysis/ad/esc9.go @@ -19,6 +19,7 @@ package ad import ( "context" "fmt" + "log/slog" "sync" "github.com/specterops/bloodhound/analysis" @@ -45,7 +46,7 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys } else { for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC9(template, false); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue @@ -56,10 +57,10 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if filteredVictims, err := filterUserDNSResults(tx, victimBitmap, template); err != nil { - log.Warnf(fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) continue } else if attackers, err := FetchAttackersForEscalations9and10(tx, filteredVictims, false); err != nil { - log.Warnf(fmt.Sprintf("Error getting start nodes for esc9a attacker nodes: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting start nodes for esc9a attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) @@ -91,7 +92,7 @@ func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analys } else { for _, template := range publishedCertTemplates { if valid, err := isCertTemplateValidForESC9(template, true); err != nil { - log.Warnf(fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Error validating cert template %d: %v", template.ID, err)) continue } else if !valid { continue @@ -102,7 +103,7 @@ func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analys victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if attackers, err := FetchAttackersForEscalations9and10(tx, victimBitmap, true); err != nil { - log.Warnf(fmt.Sprintf("Error getting start nodes for esc9a attacker nodes: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Error getting start nodes for esc9a attacker nodes: %v", err)) continue } else { results.Or(graph.NodeIDsToDuplex(attackers)) diff --git a/packages/go/analysis/ad/esc_shared.go b/packages/go/analysis/ad/esc_shared.go index ade6370eb3..142a6b61b1 100644 --- a/packages/go/analysis/ad/esc_shared.go +++ b/packages/go/analysis/ad/esc_shared.go @@ -32,7 +32,6 @@ import ( "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/slicesext" ) @@ -46,7 +45,7 @@ func PostTrustedForNTAuth(ctx context.Context, db graph.Database, operation anal operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if thumbprints, err := innerNode.Properties.Get(ad.CertThumbprints.String()).StringSlice(); err != nil { if strings.Contains(err.Error(), graph.ErrPropertyNotFound.Error()) { - log.Warnf(fmt.Sprintf("Unable to post-process TrustedForNTAuth edge for NTAuthStore node %d due to missing adcs data: %v", innerNode.ID, err)) + slog.WarnContext(ctx, fmt.Sprintf("Unable to post-process TrustedForNTAuth edge for NTAuthStore node %d due to missing adcs data: %v", innerNode.ID, err)) return nil } return err diff --git a/packages/go/analysis/ad/ntlm.go b/packages/go/analysis/ad/ntlm.go index 22226e77c5..c780ed509d 100644 --- a/packages/go/analysis/ad/ntlm.go +++ b/packages/go/analysis/ad/ntlm.go @@ -19,6 +19,7 @@ package ad import ( "context" "errors" + "log/slog" "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/analysis/impact" @@ -28,7 +29,6 @@ import ( "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) // PostNTLM is the initial function used to execute our NTLM analysis @@ -54,7 +54,7 @@ func PostNTLM(ctx context.Context, db graph.Database, groupExpansions impact.Pat } else if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { return PostCoerceAndRelayNTLMToSMB(tx, outC, groupExpansions, innerComputer, authenticatedUserID) }); err != nil { - log.Warnf("Post processing failed for %s: %v", ad.CoerceAndRelayNTLMToSMB, err) + slog.WarnContext(ctx, "Post processing failed for %s: %v", ad.CoerceAndRelayNTLMToSMB, err) // Additional analysis may occur if one of our analysis errors continue } diff --git a/packages/go/analysis/ad/post.go b/packages/go/analysis/ad/post.go index 4530d33263..4ad70e60a0 100644 --- a/packages/go/analysis/ad/post.go +++ b/packages/go/analysis/ad/post.go @@ -32,7 +32,6 @@ import ( "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) func PostProcessedRelationships() []graph.Kind { @@ -186,7 +185,7 @@ func getLAPSSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions im ) if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) + slog.Warn(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil, err } else if getChangesNodes, err := ops.FetchStartNodes(getChangesQuery); err != nil { return nil, err @@ -206,7 +205,7 @@ func getDCSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions impa ) if domainsid, err := domain.Properties.Get(ad.DomainSID.String()).String(); err != nil { - log.Warnf(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) + slog.Warn(fmt.Sprintf("Error getting domain SID for domain %d: %v", domain.ID, err)) return nil, err } else if getChangesNodes, err := ops.FetchStartNodes(getChangesQuery); err != nil { return nil, err diff --git a/packages/go/analysis/azure/filters.go b/packages/go/analysis/azure/filters.go index 558dd5fb14..4a86814446 100644 --- a/packages/go/analysis/azure/filters.go +++ b/packages/go/analysis/azure/filters.go @@ -18,12 +18,12 @@ package azure import ( "fmt" + "log/slog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/azure" - "github.com/specterops/bloodhound/log" ) func FilterEntityActiveAssignments() graph.Criteria { @@ -96,7 +96,7 @@ func roleDescentFilter(ctx *ops.TraversalContext, segment *graph.PathSegment) bo // If the group does not allow role inheritance then we do not inherit the terminal role if isRoleAssignable, err := end.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil || !isRoleAssignable { if graph.IsErrPropertyNotFound(err) { - log.Warnf(fmt.Sprintf("Node %d is missing property %s", end.ID, azure.IsAssignableToRole)) + slog.Warn(fmt.Sprintf("Node %d is missing property %s", end.ID, azure.IsAssignableToRole)) } acceptDescendent = false return false diff --git a/packages/go/analysis/azure/post.go b/packages/go/analysis/azure/post.go index 076b9bd7aa..4dfc770470 100644 --- a/packages/go/analysis/azure/post.go +++ b/packages/go/analysis/azure/post.go @@ -871,7 +871,7 @@ func addMembers(roleAssignments RoleAssignments, operation analysis.StatTrackedO if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if isRoleAssignable, err := innerGroup.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil { if graph.IsErrPropertyNotFound(err) { - log.Warnf(fmt.Sprintf("Node %d is missing property %s", innerGroup.ID, azure.IsAssignableToRole)) + slog.WarnContext(ctx, fmt.Sprintf("Node %d is missing property %s", innerGroup.ID, azure.IsAssignableToRole)) } else { return err } diff --git a/packages/go/analysis/azure/service_principal.go b/packages/go/analysis/azure/service_principal.go index 0b48f8874f..78671ad7c6 100644 --- a/packages/go/analysis/azure/service_principal.go +++ b/packages/go/analysis/azure/service_principal.go @@ -24,7 +24,6 @@ import ( "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) func NewServicePrincipalEntityDetails(node *graph.Node) ServicePrincipalDetails { @@ -60,7 +59,7 @@ func getServicePrincipalAppID(tx graph.Transaction, node *graph.Node) (string, e return appID, err } else if servicePrincipalApps.Len() == 0 { // Don't want this to break the function, but we'll want to know about it - log.Warnf(fmt.Sprintf("Service principal node %d has no applications attached", node.ID)) + slog.Warn(fmt.Sprintf("Service principal node %d has no applications attached", node.ID)) } else { app := servicePrincipalApps.Pick() diff --git a/packages/go/dawgs/drivers/pg/pg.go b/packages/go/dawgs/drivers/pg/pg.go index c70a293cbb..115e316bb9 100644 --- a/packages/go/dawgs/drivers/pg/pg.go +++ b/packages/go/dawgs/drivers/pg/pg.go @@ -19,6 +19,7 @@ package pg import ( "context" "fmt" + "log/slog" "time" "github.com/jackc/pgx/v5" @@ -26,7 +27,6 @@ import ( "github.com/specterops/bloodhound/cypher/models/pgsql" "github.com/specterops/bloodhound/dawgs" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" ) const ( @@ -59,7 +59,7 @@ func afterPooledConnectionRelease(conn *pgx.Conn) bool { if _, hasType := conn.TypeMap().TypeForName(dataType.String()); !hasType { // This connection should be destroyed since it does not contain information regarding the schema's // composite types - log.Warnf(fmt.Sprintf("Unable to find expected data type: %s. This database connection will not be pooled.", dataType)) + slog.Warn(fmt.Sprintf("Unable to find expected data type: %s. This database connection will not be pooled.", dataType)) return false } } diff --git a/packages/go/dawgs/traversal/traversal.go b/packages/go/dawgs/traversal/traversal.go index 538c0131f9..eac36fec75 100644 --- a/packages/go/dawgs/traversal/traversal.go +++ b/packages/go/dawgs/traversal/traversal.go @@ -125,12 +125,12 @@ func (s *pattern) Do(delegate PatternMatchDelegate) Driver { func (s *pattern) OutboundWithDepth(min, max int, criteria ...graph.Criteria) PatternContinuation { if min < 0 { min = 1 - log.Warnf(fmt.Sprintf("Negative mindepth not allowed. Setting min depth for expansion to 1")) + slog.Warn("Negative mindepth not allowed. Setting min depth for expansion to 1") } if max < 0 { max = 0 - log.Warnf(fmt.Sprintf("Negative maxdepth not allowed. Setting max depth for expansion to 0")) + slog.Warn("Negative maxdepth not allowed. Setting max depth for expansion to 0") } s.expansions = append(s.expansions, expansion{ @@ -153,12 +153,12 @@ func (s *pattern) Outbound(criteria ...graph.Criteria) PatternContinuation { func (s *pattern) InboundWithDepth(min, max int, criteria ...graph.Criteria) PatternContinuation { if min < 0 { min = 1 - log.Warnf(fmt.Sprintf("Negative mindepth not allowed. Setting min depth for expansion to 1")) + slog.Warn("Negative mindepth not allowed. Setting min depth for expansion to 1") } if max < 0 { max = 0 - log.Warnf(fmt.Sprintf("Negative maxdepth not allowed. Setting max depth for expansion to 0")) + slog.Warn("Negative maxdepth not allowed. Setting max depth for expansion to 0") } s.expansions = append(s.expansions, expansion{ diff --git a/packages/go/ein/azure.go b/packages/go/ein/azure.go index 29cbcfea6a..18250274bc 100644 --- a/packages/go/ein/azure.go +++ b/packages/go/ein/azure.go @@ -34,7 +34,6 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) const ( @@ -456,7 +455,7 @@ func ConvertAzureGroupMembersToRels(data models.GroupMembers) []IngestibleRelati if err := json.Unmarshal(raw.Member, &member); err != nil { slog.Error(fmt.Sprintf(SerialError, "azure group member", err)) } else if memberType, err := ExtractTypeFromDirectoryObject(member); errors.Is(err, ErrInvalidType) { - log.Warnf(fmt.Sprintf(ExtractError, err)) + slog.Warn(fmt.Sprintf(ExtractError, err)) } else if err != nil { slog.Error(fmt.Sprintf(ExtractError, err)) } else { @@ -490,7 +489,7 @@ func ConvertAzureGroupOwnerToRels(data models.GroupOwners) []IngestibleRelations if err := json.Unmarshal(raw.Owner, &owner); err != nil { slog.Error(fmt.Sprintf(SerialError, "azure group owner", err)) } else if ownerType, err := ExtractTypeFromDirectoryObject(owner); errors.Is(err, ErrInvalidType) { - log.Warnf(fmt.Sprintf(ExtractError, err)) + slog.Warn(fmt.Sprintf(ExtractError, err)) } else if err != nil { slog.Error(fmt.Sprintf(ExtractError, err)) } else { @@ -1076,7 +1075,7 @@ func ConvertAzureServicePrincipalOwnerToRels(data models.ServicePrincipalOwners) if err := json.Unmarshal(raw.Owner, &owner); err != nil { slog.Error(fmt.Sprintf(SerialError, "azure service principal owner", err)) } else if ownerType, err := ExtractTypeFromDirectoryObject(owner); errors.Is(err, ErrInvalidType) { - log.Warnf(fmt.Sprintf(ExtractError, err)) + slog.Warn(fmt.Sprintf(ExtractError, err)) } else if err != nil { slog.Error(fmt.Sprintf(ExtractError, err)) } else { diff --git a/packages/go/stbernard/workspace/golang/build.go b/packages/go/stbernard/workspace/golang/build.go index 6e44115f1b..6c5ff496e3 100644 --- a/packages/go/stbernard/workspace/golang/build.go +++ b/packages/go/stbernard/workspace/golang/build.go @@ -25,7 +25,6 @@ import ( "sync" "github.com/Masterminds/semver/v3" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/cmdrunner" "github.com/specterops/bloodhound/packages/go/stbernard/environment" "github.com/specterops/bloodhound/packages/go/stbernard/git" @@ -43,7 +42,7 @@ func BuildMainPackages(workRoot string, modPaths []string, env environment.Envir ) if version, err = git.ParseLatestVersionFromTags(workRoot, env); err != nil { - log.Warnf(fmt.Sprintf("Failed to parse version from git tags, falling back to environment variable: %v", err)) + slog.Warn(fmt.Sprintf("Failed to parse version from git tags, falling back to environment variable: %v", err)) parsedVersion, err := semver.NewVersion(env[environment.VersionVarName]) if err != nil { return fmt.Errorf("error parsing version from environment variable: %w", err) diff --git a/packages/go/stbernard/workspace/yarn/yarn.go b/packages/go/stbernard/workspace/yarn/yarn.go index c6c5efd5f5..c87b00216f 100644 --- a/packages/go/stbernard/workspace/yarn/yarn.go +++ b/packages/go/stbernard/workspace/yarn/yarn.go @@ -20,10 +20,10 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "os" "path/filepath" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/cmdrunner" "github.com/specterops/bloodhound/packages/go/stbernard/environment" "github.com/specterops/bloodhound/slicesext" @@ -164,7 +164,7 @@ func relWorkspaceToAbsWorkspace(cwd string, relWorkspace Workspace) Workspace { func getCoverage(coverFile string) (coverage, error) { var cov coverage if b, err := os.ReadFile(coverFile); err != nil { - log.Warnf(fmt.Sprintf("Could not find coverage for %s, skipping", coverFile)) + slog.Warn(fmt.Sprintf("Could not find coverage for %s, skipping", coverFile)) return cov, nil } else if err := json.Unmarshal(b, &cov); err != nil { return cov, fmt.Errorf("unmarshal coverage file %s: %w", coverFile, err) From c95ca844c4c792d260340542bb59a18666c5fde9 Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 14:36:36 -0500 Subject: [PATCH 12/20] BED-4153: Migrate log.Debug and log.Panic --- cmd/api/src/api/bloodhoundgraph/properties.go | 4 +- cmd/api/src/api/middleware/middleware.go | 3 +- cmd/api/src/api/v2/apitest/test.go | 6 +- cmd/api/src/cmd/bhapi/main.go | 10 +-- cmd/api/src/daemons/datapipe/cleanup.go | 6 +- cmd/api/src/database/log.go | 4 +- cmd/api/src/queries/graph.go | 4 +- cmd/api/src/test/lab/fixtures/api.go | 6 +- packages/go/analysis/ad/esc10.go | 5 +- packages/go/analysis/ad/esc13.go | 3 +- packages/go/analysis/ad/esc9.go | 5 +- packages/go/analysis/azure/post.go | 3 +- packages/go/analysis/hybrid/hybrid.go | 4 +- packages/go/analysis/impact/aggregator.go | 3 +- packages/go/analysis/impact/id_aggregator.go | 3 +- packages/go/analysis/post.go | 8 +- packages/go/analysis/post_operation.go | 8 +- packages/go/cache/cache_benchmark_test.go | 4 +- packages/go/conftool/main.go | 13 +++- packages/go/dawgs/traversal/traversal.go | 11 ++- packages/go/ein/ad.go | 5 +- packages/go/log/log.go | 77 +------------------ packages/go/schemagen/generator/cue.go | 5 +- packages/go/schemagen/main.go | 18 +++-- packages/go/stbernard/analyzers/analyzers.go | 8 +- .../go/stbernard/command/builder/builder.go | 4 +- .../go/stbernard/command/tester/tester.go | 3 +- packages/go/stbernard/main.go | 10 ++- 28 files changed, 87 insertions(+), 156 deletions(-) diff --git a/cmd/api/src/api/bloodhoundgraph/properties.go b/cmd/api/src/api/bloodhoundgraph/properties.go index 568ee5d2a8..7ff9304afa 100644 --- a/cmd/api/src/api/bloodhoundgraph/properties.go +++ b/cmd/api/src/api/bloodhoundgraph/properties.go @@ -18,13 +18,13 @@ package bloodhoundgraph import ( "fmt" + "log/slog" "strings" "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) // We ignore the property lookup errors here since there's no clear path for a caller to handle it. Logging is also @@ -33,7 +33,7 @@ import ( func getNodeLevel(target *graph.Node) (int, bool) { if startSystemTags, err := target.Properties.Get(common.SystemTags.String()).String(); err == nil { - log.Debugf(fmt.Sprintf("Unable to find a %s property for node %d with kinds %v", common.SystemTags.String(), target.ID, target.Kinds)) + slog.Debug(fmt.Sprintf("Unable to find a %s property for node %d with kinds %v", common.SystemTags.String(), target.ID, target.Kinds)) } else if strings.Contains(startSystemTags, ad.AdminTierZero) { return 0, true } diff --git a/cmd/api/src/api/middleware/middleware.go b/cmd/api/src/api/middleware/middleware.go index 67fa73cc53..344e5e3abd 100644 --- a/cmd/api/src/api/middleware/middleware.go +++ b/cmd/api/src/api/middleware/middleware.go @@ -31,7 +31,6 @@ import ( "github.com/gorilla/handlers" "github.com/gorilla/mux" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/ctx" @@ -164,7 +163,7 @@ func parseUserIP(r *http.Request) string { } if result := r.Header.Get("X-Forwarded-For"); result == "" { - log.Debugf(fmt.Sprintf("No data found in X-Forwarded-For header")) + slog.DebugContext(r.Context(), fmt.Sprintf("No data found in X-Forwarded-For header")) return remoteIp } else { result += "," + remoteIp diff --git a/cmd/api/src/api/v2/apitest/test.go b/cmd/api/src/api/v2/apitest/test.go index 77f815b687..b5b9196c42 100644 --- a/cmd/api/src/api/v2/apitest/test.go +++ b/cmd/api/src/api/v2/apitest/test.go @@ -18,7 +18,8 @@ package apitest import ( "fmt" - "log" + "log/slog" + "os" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/api/v2/auth" @@ -31,7 +32,8 @@ import ( func NewAuthManagementResource(mockCtrl *gomock.Controller) (auth.ManagementResource, *mocks.MockDatabase) { cfg, err := config.NewDefaultConfiguration() if err != nil { - log.Fatalf(fmt.Sprintf("Failed to create default configuration: %v", err)) + slog.Error(fmt.Sprintf("Failed to create default configuration: %v", err)) + os.Exit(1) } cfg.Crypto.Argon2.NumIterations = 1 diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index c74b3fe9a5..eaa3e6e7f0 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -24,7 +24,6 @@ import ( "os" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/handlers" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" @@ -62,11 +61,9 @@ func main() { logger := handlers.NewDefaultLogger() slog.SetDefault(logger) - // Initialize basic logging facilities while we start up - log.ConfigureDefaults() - if cfg, err := config.GetConfiguration(configFilePath, config.NewDefaultConfiguration); err != nil { - log.Fatalf(fmt.Sprintf("Unable to read configuration %s: %v", configFilePath, err)) + slog.Error(fmt.Sprintf("Unable to read configuration %s: %v", configFilePath, err)) + os.Exit(1) } else { initializer := bootstrap.Initializer[*database.BloodhoundDB, *graph.DatabaseSwitch]{ Configuration: cfg, @@ -76,7 +73,8 @@ func main() { } if err := initializer.Launch(context.Background(), true); err != nil { - log.Fatalf(fmt.Sprintf("Failed starting the server: %v", err)) + slog.Error(fmt.Sprintf("Failed starting the server: %v", err)) + os.Exit(1) } } } diff --git a/cmd/api/src/daemons/datapipe/cleanup.go b/cmd/api/src/daemons/datapipe/cleanup.go index 228bfe2397..bebe19c345 100644 --- a/cmd/api/src/daemons/datapipe/cleanup.go +++ b/cmd/api/src/daemons/datapipe/cleanup.go @@ -26,8 +26,6 @@ import ( "path/filepath" "strings" "sync" - - "github.com/specterops/bloodhound/log" ) // FileOperations is an interface for describing filesystem actions. This implementation exists due to deficiencies in @@ -82,7 +80,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin defer s.lock.Unlock() slog.InfoContext(ctx, fmt.Sprintf("Running OrphanFileSweeper for path %s", s.tempDirectoryRootPath)) - log.Debugf(fmt.Sprintf("OrphanFileSweeper expected names %v", expectedFileNames)) + slog.DebugContext(ctx, fmt.Sprintf("OrphanFileSweeper expected names %v", expectedFileNames)) if dirEntries, err := s.fileOps.ReadDir(s.tempDirectoryRootPath); err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed reading work directory %s: %v", s.tempDirectoryRootPath, err)) @@ -101,7 +99,7 @@ func (s *OrphanFileSweeper) Clear(ctx context.Context, expectedFileNames []strin } for idx, dirEntry := range dirEntries { if expectedFN == dirEntry.Name() { - log.Debugf(fmt.Sprintf("skipping expected file %s", expectedFN)) + slog.DebugContext(ctx, fmt.Sprintf("skipping expected file %s", expectedFN)) dirEntries = append(dirEntries[:idx], dirEntries[idx+1:]...) } } diff --git a/cmd/api/src/database/log.go b/cmd/api/src/database/log.go index 66b16420d5..24ab256a40 100644 --- a/cmd/api/src/database/log.go +++ b/cmd/api/src/database/log.go @@ -60,9 +60,9 @@ func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() ( sql, _ := fc() if slog.Default().Enabled(ctx, slog.LevelDebug) { - slog.ErrorContext(ctx, "Database error", "query", sql, "err", err, handlers.GetSlogCallStack()) + slog.ErrorContext(ctx, "Database error", "query", sql, "error", err, handlers.GetSlogCallStack()) } else { - slog.ErrorContext(ctx, "Database error", "query", sql, "err", err) + slog.ErrorContext(ctx, "Database error", "query", sql, "error", err) } } else { elapsed := time.Since(begin) diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index caed3071fd..c6dac7fd9f 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -444,13 +444,13 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i ) if bhCtxInst.Timeout > maxTimeout { - log.Debugf(fmt.Sprintf("Custom timeout is too large, using the maximum allowable timeout of %d minutes instead", maxTimeout.Minutes())) + slog.DebugContext(ctx, fmt.Sprintf("Custom timeout is too large, using the maximum allowable timeout of %d minutes instead", maxTimeout.Minutes())) bhCtxInst.Timeout = maxTimeout } availableRuntime := bhCtxInst.Timeout if availableRuntime > 0 { - log.Debugf(fmt.Sprintf("Available timeout for query is set to: %d seconds", availableRuntime.Seconds())) + slog.DebugContext(ctx, fmt.Sprintf("Available timeout for query is set to: %d seconds", availableRuntime.Seconds())) } else { availableRuntime = defaultTimeout if !s.DisableCypherComplexityLimit { diff --git a/cmd/api/src/test/lab/fixtures/api.go b/cmd/api/src/test/lab/fixtures/api.go index b730553c1b..b2c3889031 100644 --- a/cmd/api/src/test/lab/fixtures/api.go +++ b/cmd/api/src/test/lab/fixtures/api.go @@ -19,9 +19,10 @@ package fixtures import ( "context" "fmt" - "log" + "log/slog" "net/http" "net/url" + "os" "sync" "time" @@ -88,7 +89,8 @@ func NewCustomApiFixture(cfgFixture *lab.Fixture[config.Configuration]) *lab.Fix }) if err := lab.SetDependency(fixture, cfgFixture); err != nil { - log.Fatalf(fmt.Sprintf("BHApiFixture dependency error: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("BHApiFixture dependency error: %v", err)) + os.Exit(1) } return fixture diff --git a/packages/go/analysis/ad/esc10.go b/packages/go/analysis/ad/esc10.go index 9cadb50dc5..60c0937280 100644 --- a/packages/go/analysis/ad/esc10.go +++ b/packages/go/analysis/ad/esc10.go @@ -32,7 +32,6 @@ import ( "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/ein" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca, domain *graph.Node, cache ADCSCache) error { @@ -52,7 +51,7 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) + slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) @@ -98,7 +97,7 @@ func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analy } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) + slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(enterpriseCA.ID)) diff --git a/packages/go/analysis/ad/esc13.go b/packages/go/analysis/ad/esc13.go index 860d918d68..2f0ecd1bcc 100644 --- a/packages/go/analysis/ad/esc13.go +++ b/packages/go/analysis/ad/esc13.go @@ -32,7 +32,6 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca, domain *graph.Node, cache ADCSCache) error { @@ -117,7 +116,7 @@ func groupIsContainedOrTrusted(tx graph.Transaction, group, domain *graph.Node) ) if err := ops.Traversal(tx, traversalPlan, pathVisitor); err != nil { - log.Debugf(fmt.Sprintf("groupIsContainedOrTrusted traversal error: %v", err)) + slog.Debug(fmt.Sprintf("groupIsContainedOrTrusted traversal error: %v", err)) } return matchFound diff --git a/packages/go/analysis/ad/esc9.go b/packages/go/analysis/ad/esc9.go index 6f91387443..a9648c389a 100644 --- a/packages/go/analysis/ad/esc9.go +++ b/packages/go/analysis/ad/esc9.go @@ -31,7 +31,6 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" ) func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca, domain *graph.Node, cache ADCSCache) error { @@ -51,7 +50,7 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) + slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) @@ -97,7 +96,7 @@ func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analys } else if !valid { continue } else if certTemplateEnrollers := cache.GetCertTemplateEnrollers(template.ID); len(certTemplateEnrollers) == 0 { - log.Debugf(fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) + slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) diff --git a/packages/go/analysis/azure/post.go b/packages/go/analysis/azure/post.go index 4dfc770470..d0eb9c4b0c 100644 --- a/packages/go/analysis/azure/post.go +++ b/packages/go/analysis/azure/post.go @@ -30,7 +30,6 @@ import ( "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) func AddMemberAllGroupsTargetRoles() []string { @@ -661,7 +660,7 @@ func addSecret(operation analysis.StatTrackedOperation[analysis.CreatePostRelati } else { for _, role := range addSecretRoles { for _, target := range tenantAppsAndSPs { - log.Debugf(fmt.Sprintf("Adding AZAddSecret edge from role %s to %s %d", role.ID.String(), target.Kinds.Strings(), target.ID)) + slog.DebugContext(ctx, fmt.Sprintf("Adding AZAddSecret edge from role %s to %s %d", role.ID.String(), target.Kinds.Strings(), target.ID)) nextJob := analysis.CreatePostRelationshipJob{ FromID: role.ID, ToID: target.ID, diff --git a/packages/go/analysis/hybrid/hybrid.go b/packages/go/analysis/hybrid/hybrid.go index 0d31ca1ee9..49552ad3dd 100644 --- a/packages/go/analysis/hybrid/hybrid.go +++ b/packages/go/analysis/hybrid/hybrid.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/analysis/azure" @@ -30,7 +31,6 @@ import ( adSchema "github.com/specterops/bloodhound/graphschema/ad" azureSchema "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" ) func PostHybrid(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { @@ -179,7 +179,7 @@ func createMissingADUser(ctx context.Context, db graph.Database, objectID string newNode *graph.Node ) - log.Debugf(fmt.Sprintf("Matching AD User node with objectID %s not found, creating a new one", objectID)) + slog.DebugContext(ctx, fmt.Sprintf("Matching AD User node with objectID %s not found, creating a new one", objectID)) properties := graph.AsProperties(map[string]any{ common.ObjectID.String(): objectID, }) diff --git a/packages/go/analysis/impact/aggregator.go b/packages/go/analysis/impact/aggregator.go index 135ca3a6e3..2bd78fa8fa 100644 --- a/packages/go/analysis/impact/aggregator.go +++ b/packages/go/analysis/impact/aggregator.go @@ -24,7 +24,6 @@ import ( "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" ) // Aggregator is a cardinality aggregator for paths and shortcut paths. @@ -163,7 +162,7 @@ func (s Aggregator) resolve(targetID uint64) cardinality.Provider[uint64] { } func (s Aggregator) Cardinality(targets ...uint64) cardinality.Provider[uint64] { - log.Debugf(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) + slog.Debug(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) defer measure.Measure(slog.LevelDebug, "Calculated pathMembers cardinality", "num_targets", len(targets))() impact := s.newCardinalityProvider() diff --git a/packages/go/analysis/impact/id_aggregator.go b/packages/go/analysis/impact/id_aggregator.go index 76c97a0adc..99676c6fa4 100644 --- a/packages/go/analysis/impact/id_aggregator.go +++ b/packages/go/analysis/impact/id_aggregator.go @@ -23,7 +23,6 @@ import ( "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" ) @@ -214,7 +213,7 @@ func (s IDA) resolve(targetID uint64) cardinality.Provider[uint64] { } func (s IDA) Cardinality(targets ...uint64) cardinality.Provider[uint64] { - log.Debugf(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) + slog.Debug(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) defer measure.Measure(slog.LevelDebug, "Calculated pathMembers cardinality for %d targets", len(targets))() impact := s.newCardinalityProvider() diff --git a/packages/go/analysis/post.go b/packages/go/analysis/post.go index 7df0894452..133f769100 100644 --- a/packages/go/analysis/post.go +++ b/packages/go/analysis/post.go @@ -95,19 +95,19 @@ func (s PostProcessingStats) LogStats() { return } - log.Debugf(fmt.Sprintf("Relationships deleted before post-processing:")) + slog.Debug(fmt.Sprintf("Relationships deleted before post-processing:")) for _, relationship := range statsSortedKeys(s.RelationshipsDeleted) { if numDeleted := s.RelationshipsDeleted[relationship]; numDeleted > 0 { - log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) + slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) } } - log.Debugf(fmt.Sprintf("Relationships created after post-processing:")) + slog.Debug(fmt.Sprintf("Relationships created after post-processing:")) for _, relationship := range statsSortedKeys(s.RelationshipsCreated) { if numDeleted := s.RelationshipsCreated[relationship]; numDeleted > 0 { - log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), s.RelationshipsCreated[relationship])) + slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), s.RelationshipsCreated[relationship])) } } } diff --git a/packages/go/analysis/post_operation.go b/packages/go/analysis/post_operation.go index d289a86854..3f393100fe 100644 --- a/packages/go/analysis/post_operation.go +++ b/packages/go/analysis/post_operation.go @@ -136,19 +136,19 @@ func (s *AtomicPostProcessingStats) LogStats() { return } - log.Debugf(fmt.Sprintf("Relationships deleted before post-processing:")) + slog.Debug(fmt.Sprintf("Relationships deleted before post-processing:")) for _, relationship := range atomicStatsSortedKeys(s.RelationshipsDeleted) { if numDeleted := int(*s.RelationshipsDeleted[relationship]); numDeleted > 0 { - log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) + slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) } } - log.Debugf(fmt.Sprintf("Relationships created after post-processing:")) + slog.Debug(fmt.Sprintf("Relationships created after post-processing:")) for _, relationship := range atomicStatsSortedKeys(s.RelationshipsCreated) { if numCreated := int(*s.RelationshipsCreated[relationship]); numCreated > 0 { - log.Debugf(fmt.Sprintf(" %s %d", relationship.String(), numCreated)) + slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), numCreated)) } } } diff --git a/packages/go/cache/cache_benchmark_test.go b/packages/go/cache/cache_benchmark_test.go index 31e07b4cb7..5183291c80 100644 --- a/packages/go/cache/cache_benchmark_test.go +++ b/packages/go/cache/cache_benchmark_test.go @@ -18,7 +18,7 @@ package cache_test import ( "fmt" - "log" + "log/slog" "math" "testing" @@ -44,7 +44,7 @@ func getObjectIDs(num int) []string { func setupLRUCache() cache.Cache { if c, err := cache.NewCache(cache.Config{MaxSize: numSimulatedOUs}); err != nil { - log.Fatalf(fmt.Sprintf("Error creating cache: %v", err)) + slog.Error(fmt.Sprintf("Error creating cache: %v", err)) } else { return c } diff --git a/packages/go/conftool/main.go b/packages/go/conftool/main.go index f602c96721..8eea312a3a 100644 --- a/packages/go/conftool/main.go +++ b/packages/go/conftool/main.go @@ -21,6 +21,7 @@ import ( "flag" "fmt" "log" + "log/slog" "os" "time" @@ -39,7 +40,8 @@ func main() { flag.Parse() if configfile, err := os.Create(path); err != nil { - log.Fatalf(fmt.Sprintf("Could not create config file %s: %v", path, err)) + slog.Error(fmt.Sprintf("Could not create config file %s: %v", path, err)) + os.Exit(1) } else { defer configfile.Close() @@ -48,11 +50,14 @@ func main() { } if argon2Config, err := config.GenerateArgonSettings(time.Duration(tuneMillis), skipArgon2); err != nil { - log.Fatalf(fmt.Sprintf("Could not generate argon2 settings: %v", err)) + slog.Error(fmt.Sprintf("Could not generate argon2 settings: %v", err)) + os.Exit(1) } else if bytes, err := json.Marshal(argon2Config); err != nil { - log.Fatalf(fmt.Sprintf("Coule not marshal argon2 settings: %v", err)) + slog.Error(fmt.Sprintf("Coule not marshal argon2 settings: %v", err)) + os.Exit(1) } else if _, err := configfile.Write(bytes); err != nil { - log.Fatalf(fmt.Sprintf("Could not write to config file %s: %v", path, err)) + slog.Error(fmt.Sprintf("Could not write to config file %s: %v", path, err)) + os.Exit(1) } else { log.Printf(fmt.Sprintf("Successfully wrote to config file to %s", path)) } diff --git a/packages/go/dawgs/traversal/traversal.go b/packages/go/dawgs/traversal/traversal.go index eac36fec75..ee15b5edc5 100644 --- a/packages/go/dawgs/traversal/traversal.go +++ b/packages/go/dawgs/traversal/traversal.go @@ -32,7 +32,6 @@ import ( "github.com/specterops/bloodhound/dawgs/util" "github.com/specterops/bloodhound/dawgs/util/atomics" "github.com/specterops/bloodhound/dawgs/util/channels" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/log/measure" ) @@ -529,21 +528,21 @@ func FilteredSkipLimit(filter SkipLimitFilter, visitorFilter SegmentVisitor, ski if skip == 0 || shouldCollect() { // If we should collect this result, check to see if we're already at a limit for the number of results if limit > 0 && atLimit() { - log.Debugf(fmt.Sprintf("At collection limit, rejecting path: %s", graph.FormatPathSegment(next))) + slog.Debug(fmt.Sprintf("At collection limit, rejecting path: %s", graph.FormatPathSegment(next))) return false } - log.Debugf(fmt.Sprintf("Collected path: %s", graph.FormatPathSegment(next))) + slog.Debug(fmt.Sprintf("Collected path: %s", graph.FormatPathSegment(next))) visitorFilter(next) } else { - log.Debugf(fmt.Sprintf("Skipping path visit: %s", graph.FormatPathSegment(next))) + slog.Debug(fmt.Sprintf("Skipping path visit: %s", graph.FormatPathSegment(next))) } } if shouldDescend { - log.Debugf(fmt.Sprintf("Descending into path: %s", graph.FormatPathSegment(next))) + slog.Debug(fmt.Sprintf("Descending into path: %s", graph.FormatPathSegment(next))) } else { - log.Debugf(fmt.Sprintf("Rejecting further descent into path: %s", graph.FormatPathSegment(next))) + slog.Debug(fmt.Sprintf("Rejecting further descent into path: %s", graph.FormatPathSegment(next))) } return shouldDescend diff --git a/packages/go/ein/ad.go b/packages/go/ein/ad.go index 8fb497f40d..733c9ab06e 100644 --- a/packages/go/ein/ad.go +++ b/packages/go/ein/ad.go @@ -25,7 +25,6 @@ import ( "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/slicesext" ) @@ -79,7 +78,7 @@ func stringToBool(itemProps map[string]any, keyName string) { case bool: //pass default: - log.Debugf(fmt.Sprintf("Removing %s with type %T", converted)) + slog.Debug(fmt.Sprintf("Removing %s with type %T", converted)) delete(itemProps, keyName) } } @@ -97,7 +96,7 @@ func stringToInt(itemProps map[string]any, keyName string) { case int: //pass default: - log.Debugf(fmt.Sprintf("Removing %s with type %T", keyName, converted)) + slog.Debug(fmt.Sprintf("Removing %s with type %T", keyName, converted)) delete(itemProps, keyName) } } diff --git a/packages/go/log/log.go b/packages/go/log/log.go index bfbd89488f..b0f1450008 100644 --- a/packages/go/log/log.go +++ b/packages/go/log/log.go @@ -18,10 +18,10 @@ package log import ( "fmt" + "strings" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "os" - "strings" ) // Level is a type alias that represents a log verbosity level. @@ -137,76 +137,3 @@ func WithLevel(level Level) Event { event: log.WithLevel(level), } } - -// Panic returns a logging event with the LevelPanic log verbosity level. -func Panic() Event { - return WithLevel(LevelPanic) -} - -// Panicf is a convenience function for writing a log event with the given format and arguments with the LevelPanic -// log verbosity level. -func Panicf(format string, args ...any) { - Panic().Msgf(format, args...) -} - -// Fatalf is a convenience function for writing a log event with the given format and arguments with the LevelFatal -// log verbosity level. -func Fatalf(format string, args ...any) { - WithLevel(LevelFatal).Msgf(format, args...) - os.Exit(1) -} - -// Error returns a logging event with the LevelError log verbosity level. -func Error() Event { - return WithLevel(LevelError) -} - -// Errorf is a convenience function for writing a log event with the given format and arguments with the LevelError -// log verbosity level. -func Errorf(format string, args ...any) { - Error().Msgf(format, args...) -} - -// Warn returns a logging event with the LevelWarn log verbosity level. -func Warn() Event { - return WithLevel(LevelWarn) -} - -// Warnf is a convenience function for writing a log event with the given format and arguments with the LevelWarn -// log verbosity level. -func Warnf(format string, args ...any) { - Warn().Msgf(format, args...) -} - -// Info returns a logging event with the LevelInfo log verbosity level. -func Info() Event { - return WithLevel(LevelInfo) -} - -// Infof is a convenience function for writing a log event with the given format and arguments with the LevelInfo -// log verbosity level. -func Infof(format string, args ...any) { - Info().Msgf(format, args...) -} - -// Debug returns a logging event with the LevelDebug log verbosity level. -func Debug() Event { - return WithLevel(LevelDebug) -} - -// Debugf is a convenience function for writing a log event with the given format and arguments with the LevelDebug -// log verbosity level. -func Debugf(format string, args ...any) { - Debug().Msgf(format, args...) -} - -// Trace returns a logging event with the LevelTrace log verbosity level. -func Trace() Event { - return WithLevel(LevelTrace) -} - -// Tracef is a convenience function for writing a log event with the given format and arguments with the LevelTrace -// log verbosity level. -func Tracef(format string, args ...any) { - Trace().Msgf(format, args...) -} diff --git a/packages/go/schemagen/generator/cue.go b/packages/go/schemagen/generator/cue.go index db4a51b627..4c970f353a 100644 --- a/packages/go/schemagen/generator/cue.go +++ b/packages/go/schemagen/generator/cue.go @@ -19,12 +19,11 @@ package generator import ( "fmt" "io/fs" + "log/slog" "os" "path/filepath" "strings" - "github.com/specterops/bloodhound/log" - "cuelang.org/go/cue" "cuelang.org/go/cue/cuecontext" "cuelang.org/go/cue/load" @@ -96,7 +95,7 @@ func (s *ConfigBuilder) OverlayPath(rootPath string) error { } else { overlayPath := filepath.Join(s.overlayRootPath, strings.TrimPrefix(path, rootPath)) - log.Debugf(fmt.Sprintf("Overlaying file: %s to %s", path, overlayPath)) + slog.Debug(fmt.Sprintf("Overlaying file: %s to %s", path, overlayPath)) s.overlay[overlayPath] = load.FromBytes(content) } diff --git a/packages/go/schemagen/main.go b/packages/go/schemagen/main.go index 384219dea7..7a6d875597 100644 --- a/packages/go/schemagen/main.go +++ b/packages/go/schemagen/main.go @@ -75,31 +75,37 @@ func main() { cfgBuilder := generator.NewConfigBuilder("/schemas") if projectRoot, err := generator.FindGolangWorkspaceRoot(); err != nil { - log.Fatalf(fmt.Sprintf("Error finding project root: %v", err)) + slog.Error(fmt.Sprintf("Error finding project root: %v", err)) + os.Exit(1) } else { slog.Info(fmt.Sprintf("Project root is %s", projectRoot)) if err := cfgBuilder.OverlayPath(filepath.Join(projectRoot, "packages/cue")); err != nil { - log.Fatalf(fmt.Sprintf("Error: %v", err)) + slog.Error(fmt.Sprintf("Error: %v", err)) + os.Exit(1) } cfg := cfgBuilder.Build() if bhInstance, err := cfg.Value("/schemas/bh/bh.cue"); err != nil { - log.Fatalf(fmt.Sprintf("Error: %v", errors.Details(err, nil))) + slog.Error(fmt.Sprintf("Error: %v", errors.Details(err, nil))) + os.Exit(1) } else { var bhModels Schema if err := bhInstance.Decode(&bhModels); err != nil { - log.Fatalf(fmt.Sprintf("Error: %v", errors.Details(err, nil))) + slog.Error(fmt.Sprintf("Error: %v", errors.Details(err, nil))) + os.Exit(1) } if err := GenerateGolang(projectRoot, bhModels); err != nil { - log.Fatalf(fmt.Sprintf("Error %v", err)) + slog.Error(fmt.Sprintf("Error %v", err)) + os.Exit(1) } if err := GenerateSharedTypeScript(projectRoot, bhModels); err != nil { - log.Fatalf(fmt.Sprintf("Error %v", err)) + slog.Error(fmt.Sprintf("Error %v", err)) + os.Exit(1) } } } diff --git a/packages/go/stbernard/analyzers/analyzers.go b/packages/go/stbernard/analyzers/analyzers.go index d81dab7642..2541a216e7 100644 --- a/packages/go/stbernard/analyzers/analyzers.go +++ b/packages/go/stbernard/analyzers/analyzers.go @@ -20,9 +20,9 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "path/filepath" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/analyzers/golang" "github.com/specterops/bloodhound/packages/go/stbernard/analyzers/js" "github.com/specterops/bloodhound/packages/go/stbernard/cmdrunner" @@ -44,14 +44,14 @@ func Run(cwd string, modPaths []string, jsPaths []string, env environment.Enviro golint, err := golang.Run(cwd, modPaths, env) if errors.Is(err, cmdrunner.ErrNonZeroExit) { - log.Debug().Msg("Ignoring golangci-lint exit code") + slog.Debug("Ignoring golangci-lint exit code") } else if err != nil { return "", fmt.Errorf("golangci-lint: %w", err) } eslint, err := js.Run(jsPaths, env) if errors.Is(err, cmdrunner.ErrNonZeroExit) { - log.Debug().Msg("Ignoring eslint exit code") + slog.Debug("Ignoring eslint exit code") } else if err != nil { return "", fmt.Errorf("eslint: %w", err) } @@ -61,7 +61,7 @@ func Run(cwd string, modPaths []string, jsPaths []string, env environment.Enviro for idx, entry := range codeClimateReport { // We're using err == nil here because we want to do nothing if an error occurs if path, err := filepath.Rel(cwd, entry.Location.Path); err != nil { - log.Debug().Fault(err).Msg("File path is either already relative or cannot be relative to workspace root") + slog.Debug("File path is either already relative or cannot be relative to workspace root", "error", err) } else { codeClimateReport[idx].Location.Path = path } diff --git a/packages/go/stbernard/command/builder/builder.go b/packages/go/stbernard/command/builder/builder.go index 6be014451b..d9e23659f6 100644 --- a/packages/go/stbernard/command/builder/builder.go +++ b/packages/go/stbernard/command/builder/builder.go @@ -19,11 +19,11 @@ package builder import ( "flag" "fmt" + "log/slog" "os" "path/filepath" "slices" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/environment" "github.com/specterops/bloodhound/packages/go/stbernard/workspace" "github.com/specterops/bloodhound/packages/go/stbernard/workspace/golang" @@ -128,7 +128,7 @@ func clearFiles(path string, entry os.DirEntry, err error) error { return nil } - log.Debugf(fmt.Sprintf("Removing %s", filepath.Join(path, entry.Name()))) + slog.Debug(fmt.Sprintf("Removing %s", filepath.Join(path, entry.Name()))) if entry.IsDir() { if err := os.RemoveAll(filepath.Join(path, entry.Name())); err != nil { diff --git a/packages/go/stbernard/command/tester/tester.go b/packages/go/stbernard/command/tester/tester.go index 05c758fd81..9aeeda1702 100644 --- a/packages/go/stbernard/command/tester/tester.go +++ b/packages/go/stbernard/command/tester/tester.go @@ -24,7 +24,6 @@ import ( "os" "path/filepath" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/environment" "github.com/specterops/bloodhound/packages/go/stbernard/workspace" "github.com/specterops/bloodhound/packages/go/stbernard/workspace/golang" @@ -115,7 +114,7 @@ func (s *command) runTests(cwd string, coverPath string, modPaths []string) erro } else { for _, entry := range dirList { if filepath.Ext(entry.Name()) == golang.CoverageExt { - log.Debugf(fmt.Sprintf("Removing %s", filepath.Join(coverPath, entry.Name()))) + slog.Debug(fmt.Sprintf("Removing %s", filepath.Join(coverPath, entry.Name()))) if err := os.Remove(filepath.Join(coverPath, entry.Name())); err != nil { return fmt.Errorf("removing %s: %w", filepath.Join(coverPath, entry.Name()), err) } diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index 018ed4ce18..160485bd5c 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "log/slog" + "os" "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/command" @@ -45,14 +46,17 @@ func main() { } if cmd, err := command.ParseCLI(env); errors.Is(err, command.ErrNoCmd) { - log.Fatalf(fmt.Sprintf("No valid command specified")) + slog.Error(fmt.Sprintf("No valid command specified")) + os.Exit(1) } else if errors.Is(err, command.ErrHelpRequested) { // No need to exit 1 if help was requested return } else if err != nil { - log.Fatalf(fmt.Sprintf("Error while parsing command: %v", err)) + slog.Error(fmt.Sprintf("Error while parsing command: %v", err)) + os.Exit(1) } else if err := cmd.Run(); err != nil { - log.Fatalf(fmt.Sprintf("Failed to run command `%s`: %v", cmd.Name(), err)) + slog.Error(fmt.Sprintf("Failed to run command `%s`: %v", cmd.Name(), err)) + os.Exit(1) } else { slog.Info(fmt.Sprintf("Command `%s` completed successfully", cmd.Name())) } From 878e29eb94f3bb93e1f9a6e8e7c37751a55c9daa Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 16:52:44 -0500 Subject: [PATCH 13/20] BED-4153: Rename log package to bhlog --- cmd/api/src/api/tools/logging.go | 8 +- cmd/api/src/api/tools/pg.go | 2 +- cmd/api/src/api/v2/analysisrequest.go | 2 +- cmd/api/src/api/v2/auth/auth_test.go | 4 +- cmd/api/src/api/v2/dataquality.go | 2 +- cmd/api/src/api/v2/file_uploads.go | 2 +- cmd/api/src/bootstrap/util.go | 8 +- cmd/api/src/cmd/bhapi/main.go | 2 +- cmd/api/src/cmd/dawgs-harness/main.go | 4 +- cmd/api/src/daemons/api/bhapi/api.go | 4 +- cmd/api/src/daemons/api/toolapi/api.go | 4 +- cmd/api/src/daemons/datapipe/agi.go | 3 +- cmd/api/src/daemons/datapipe/datapipe.go | 2 +- cmd/api/src/database/log.go | 8 +- cmd/api/src/database/log_test.go | 17 +-- cmd/api/src/migrations/manifest.go | 2 +- cmd/api/src/queries/graph.go | 12 +- cmd/api/src/services/agi/agi.go | 3 +- .../src/services/dataquality/dataquality.go | 2 +- go.work | 2 +- packages/go/analysis/ad/ad.go | 4 +- packages/go/analysis/ad/membership.go | 2 +- packages/go/analysis/ad/queries.go | 2 +- packages/go/analysis/analysis.go | 2 +- packages/go/analysis/azure/queries.go | 2 +- packages/go/analysis/azure/role.go | 2 +- packages/go/analysis/azure/tenant.go | 2 +- packages/go/analysis/impact/aggregator.go | 3 +- packages/go/analysis/impact/id_aggregator.go | 2 +- packages/go/analysis/post.go | 6 +- packages/go/analysis/post_operation.go | 6 +- .../go/{log => bhlog}/cmd/logtest/main.go | 0 packages/go/{log => bhlog}/config.go | 2 +- packages/go/{log => bhlog}/event.go | 2 +- packages/go/{log => bhlog}/go.mod | 2 +- packages/go/{log => bhlog}/go.sum | 0 packages/go/{log => bhlog}/golog.go | 2 +- .../go/{log => bhlog}/handlers/handlers.go | 0 packages/go/{log => bhlog}/log.go | 2 +- packages/go/{log => bhlog}/measure/measure.go | 0 packages/go/{log => bhlog}/mocks/event.go | 114 +++++++++--------- packages/go/dawgs/go.mod | 4 +- packages/go/dawgs/ops/traversal.go | 2 +- packages/go/dawgs/traversal/traversal.go | 2 +- packages/go/schemagen/main.go | 4 +- packages/go/stbernard/cmdrunner/cmdrunner.go | 4 +- packages/go/stbernard/command/command.go | 6 +- packages/go/stbernard/git/git.go | 6 +- packages/go/stbernard/go.mod | 8 +- packages/go/stbernard/go.sum | 23 +--- packages/go/stbernard/main.go | 8 +- 51 files changed, 148 insertions(+), 169 deletions(-) rename packages/go/{log => bhlog}/cmd/logtest/main.go (100%) rename packages/go/{log => bhlog}/config.go (99%) rename packages/go/{log => bhlog}/event.go (99%) rename packages/go/{log => bhlog}/go.mod (94%) rename packages/go/{log => bhlog}/go.sum (100%) rename packages/go/{log => bhlog}/golog.go (98%) rename packages/go/{log => bhlog}/handlers/handlers.go (100%) rename packages/go/{log => bhlog}/log.go (99%) rename packages/go/{log => bhlog}/measure/measure.go (100%) rename packages/go/{log => bhlog}/mocks/event.go (86%) diff --git a/cmd/api/src/api/tools/logging.go b/cmd/api/src/api/tools/logging.go index f36558d94d..376c8acd7c 100644 --- a/cmd/api/src/api/tools/logging.go +++ b/cmd/api/src/api/tools/logging.go @@ -19,7 +19,7 @@ package tools import ( "net/http" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/src/api" ) @@ -33,7 +33,7 @@ type LoggingLevel struct { func GetLoggingDetails(response http.ResponseWriter, request *http.Request) { api.WriteJSONResponse(request.Context(), LoggingLevel{ - Level: log.GlobalLevel().String(), + Level: bhlog.GlobalLevel().String(), }, http.StatusOK, response) } @@ -44,12 +44,12 @@ func PutLoggingDetails(response http.ResponseWriter, request *http.Request) { api.WriteJSONResponse(request.Context(), LoggingError{ Error: err.Error(), }, http.StatusBadRequest, response) - } else if level, err := log.ParseLevel(level.Level); err != nil { + } else if level, err := bhlog.ParseLevel(level.Level); err != nil { api.WriteJSONResponse(request.Context(), LoggingError{ Error: err.Error(), }, http.StatusBadRequest, response) } else { - log.SetGlobalLevel(level) + bhlog.SetGlobalLevel(level) response.WriteHeader(http.StatusOK) } } diff --git a/cmd/api/src/api/tools/pg.go b/cmd/api/src/api/tools/pg.go index 370239995d..e20eb9fa2c 100644 --- a/cmd/api/src/api/tools/pg.go +++ b/cmd/api/src/api/tools/pg.go @@ -24,12 +24,12 @@ import ( "sync" "github.com/neo4j/neo4j-go-driver/v5/neo4j/dbtype" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs" "github.com/specterops/bloodhound/dawgs/drivers/neo4j" "github.com/specterops/bloodhound/dawgs/drivers/pg" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/util/size" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/config" ) diff --git a/cmd/api/src/api/v2/analysisrequest.go b/cmd/api/src/api/v2/analysisrequest.go index 1e4b5e40da..4ae6b07f36 100644 --- a/cmd/api/src/api/v2/analysisrequest.go +++ b/cmd/api/src/api/v2/analysisrequest.go @@ -23,7 +23,7 @@ import ( "log/slog" "net/http" - "github.com/specterops/bloodhound/log/measure" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" diff --git a/cmd/api/src/api/v2/auth/auth_test.go b/cmd/api/src/api/v2/auth/auth_test.go index 81966be092..1e4f87cf41 100644 --- a/cmd/api/src/api/v2/auth/auth_test.go +++ b/cmd/api/src/api/v2/auth/auth_test.go @@ -32,8 +32,8 @@ import ( "github.com/gofrs/uuid" "github.com/gorilla/mux" "github.com/pquerna/otp/totp" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/mediatypes" "github.com/specterops/bloodhound/src/api" v2 "github.com/specterops/bloodhound/src/api/v2" @@ -1226,7 +1226,7 @@ func TestCreateUser_ResetPassword(t *testing.T) { goodUserMap, } - log.ConfigureDefaults() + bhlog.ConfigureDefaults() ctx := context.WithValue(context.Background(), ctx.ValueKey, &ctx.Context{}) payload, err := json.Marshal(input.Body) diff --git a/cmd/api/src/api/v2/dataquality.go b/cmd/api/src/api/v2/dataquality.go index 523925ad18..df561d608a 100644 --- a/cmd/api/src/api/v2/dataquality.go +++ b/cmd/api/src/api/v2/dataquality.go @@ -24,8 +24,8 @@ import ( "github.com/gorilla/mux" "github.com/specterops/bloodhound/analysis/ad" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/model" "github.com/specterops/bloodhound/src/utils" diff --git a/cmd/api/src/api/v2/file_uploads.go b/cmd/api/src/api/v2/file_uploads.go index b69299ff70..87f03d9256 100644 --- a/cmd/api/src/api/v2/file_uploads.go +++ b/cmd/api/src/api/v2/file_uploads.go @@ -27,8 +27,8 @@ import ( "strings" "github.com/gorilla/mux" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/headers" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" diff --git a/cmd/api/src/bootstrap/util.go b/cmd/api/src/bootstrap/util.go index 6d6ffb3598..7ea8f13841 100644 --- a/cmd/api/src/bootstrap/util.go +++ b/cmd/api/src/bootstrap/util.go @@ -22,12 +22,12 @@ import ( "log/slog" "os" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/dawgs" "github.com/specterops/bloodhound/dawgs/drivers/neo4j" "github.com/specterops/bloodhound/dawgs/drivers/pg" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/util/size" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api/tools" "github.com/specterops/bloodhound/src/config" ) @@ -107,17 +107,17 @@ func ConnectGraph(ctx context.Context, cfg config.Configuration) (*graph.Databas // InitializeLogging sets up output file logging, and returns errors if any func InitializeLogging(cfg config.Configuration) error { - var logLevel = log.LevelInfo + var logLevel = bhlog.LevelInfo if cfg.LogLevel != "" { - if parsedLevel, err := log.ParseLevel(cfg.LogLevel); err != nil { + if parsedLevel, err := bhlog.ParseLevel(cfg.LogLevel); err != nil { return err } else { logLevel = parsedLevel } } - log.Configure(log.DefaultConfiguration().WithLevel(logLevel)) + bhlog.Configure(bhlog.DefaultConfiguration().WithLevel(logLevel)) slog.Info("Logging configured") return nil diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index eaa3e6e7f0..00c08517dd 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -23,8 +23,8 @@ import ( "log/slog" "os" + "github.com/specterops/bloodhound/bhlog/handlers" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log/handlers" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/database" diff --git a/cmd/api/src/cmd/dawgs-harness/main.go b/cmd/api/src/cmd/dawgs-harness/main.go index a8aa18a56f..df1c0abe97 100644 --- a/cmd/api/src/cmd/dawgs-harness/main.go +++ b/cmd/api/src/cmd/dawgs-harness/main.go @@ -33,9 +33,9 @@ import ( schema "github.com/specterops/bloodhound/graphschema" "github.com/jedib0t/go-pretty/v6/table" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/dawgs" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/cmd/dawgs-harness/tests" ) @@ -126,7 +126,7 @@ func main() { flag.StringVar(&pgConnectionStr, "pg", "user=bhe dbname=bhe password=bhe4eva host=localhost", "PostgreSQL connection string.") flag.Parse() - log.ConfigureDefaults() + bhlog.ConfigureDefaults() switch testType { case "both": diff --git a/cmd/api/src/daemons/api/bhapi/api.go b/cmd/api/src/daemons/api/bhapi/api.go index 9c299fdd6f..af7d03c2d4 100644 --- a/cmd/api/src/daemons/api/bhapi/api.go +++ b/cmd/api/src/daemons/api/bhapi/api.go @@ -23,7 +23,7 @@ import ( "log/slog" "net/http" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/src/config" ) @@ -40,7 +40,7 @@ func NewDaemon(cfg config.Configuration, handler http.Handler) Daemon { server: &http.Server{ Addr: cfg.BindAddress, Handler: handler, - ErrorLog: log.Adapter(log.LevelError, "BHAPI", 0), + ErrorLog: bhlog.Adapter(bhlog.LevelError, "BHAPI", 0), }, } } diff --git a/cmd/api/src/daemons/api/toolapi/api.go b/cmd/api/src/daemons/api/toolapi/api.go index 72de66be0c..ae032d8008 100644 --- a/cmd/api/src/daemons/api/toolapi/api.go +++ b/cmd/api/src/daemons/api/toolapi/api.go @@ -26,8 +26,8 @@ import ( "github.com/go-chi/chi/v5" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/api/tools" "github.com/specterops/bloodhound/src/bootstrap" @@ -101,7 +101,7 @@ func NewDaemon[DBType database.Database](ctx context.Context, connections bootst server: &http.Server{ Addr: cfg.MetricsPort, Handler: router, - ErrorLog: log.Adapter(log.LevelError, "ToolAPI", 0), + ErrorLog: bhlog.Adapter(bhlog.LevelError, "ToolAPI", 0), }, } } diff --git a/cmd/api/src/daemons/datapipe/agi.go b/cmd/api/src/daemons/datapipe/agi.go index 2a8acbb9a2..533cb3c1b4 100644 --- a/cmd/api/src/daemons/datapipe/agi.go +++ b/cmd/api/src/daemons/datapipe/agi.go @@ -22,11 +22,10 @@ import ( "log/slog" "sync" - "github.com/specterops/bloodhound/log/measure" - commonanalysis "github.com/specterops/bloodhound/analysis" adAnalysis "github.com/specterops/bloodhound/analysis/ad" azureAnalysis "github.com/specterops/bloodhound/analysis/azure" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 3fc7e159a0..82279d28ae 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -23,9 +23,9 @@ import ( "log/slog" "time" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/cache" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" "github.com/specterops/bloodhound/src/database" diff --git a/cmd/api/src/database/log.go b/cmd/api/src/database/log.go index 24ab256a40..a0eb672d8c 100644 --- a/cmd/api/src/database/log.go +++ b/cmd/api/src/database/log.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" - "github.com/specterops/bloodhound/log/handlers" "log/slog" "time" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/handlers" "gorm.io/gorm" "gorm.io/gorm/logger" ) @@ -52,7 +52,7 @@ func (s *GormLogAdapter) Error(ctx context.Context, msg string, data ...any) { } func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) { - if log.GlobalLevel() > log.LevelDebug { + if bhlog.GlobalLevel() > bhlog.LevelDebug { return } @@ -78,7 +78,7 @@ func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() ( } else if elapsed >= s.SlowQueryWarnThreshold { sql, rows := fc() - if log.GlobalAccepts(log.LevelDebug) { + if bhlog.GlobalAccepts(bhlog.LevelDebug) { slog.WarnContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "nums_rows", rows, "sql", sql, handlers.GetSlogCallStack()) } else { slog.WarnContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "num_rows", rows) diff --git a/cmd/api/src/database/log_test.go b/cmd/api/src/database/log_test.go index b3163a99ec..39c7c00f90 100644 --- a/cmd/api/src/database/log_test.go +++ b/cmd/api/src/database/log_test.go @@ -17,27 +17,28 @@ package database_test import ( + "bytes" + "log/slog" + "strings" "testing" "time" - "github.com/specterops/bloodhound/log" - "github.com/specterops/bloodhound/log/mocks" "github.com/specterops/bloodhound/src/database" - "go.uber.org/mock/gomock" ) func TestGormLogAdapter_Info(t *testing.T) { var ( - mockCtrl = gomock.NewController(t) - mockEvent = mocks.NewMockEvent(mockCtrl) gormLogAdapter = database.GormLogAdapter{ SlowQueryWarnThreshold: time.Minute, SlowQueryErrorThreshold: time.Minute, } ) - log.ConfigureDefaults() + var buf bytes.Buffer + slog.SetDefault(slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{}))) - mockEvent.EXPECT().Msgf("message %d %s %f", 1, "arg", 2.0).Times(1) - gormLogAdapter.Log(mockEvent, "message %d %s %f", 1, "arg", 2.0) + gormLogAdapter.Info(nil, "message", "data", 1, "data2", "arg", "data3", 2.0) + if !strings.Contains(buf.String(), `message=message data=1 data2="arg" data3=2.0`) { + t.Error("failed to properly log through gorm adapter") + } } diff --git a/cmd/api/src/migrations/manifest.go b/cmd/api/src/migrations/manifest.go index ddaa4f0e2b..c63d7cca39 100644 --- a/cmd/api/src/migrations/manifest.go +++ b/cmd/api/src/migrations/manifest.go @@ -25,13 +25,13 @@ import ( "time" "github.com/specterops/bloodhound/analysis" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/version" ) diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index c6dac7fd9f..ac15e65a56 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -34,6 +34,8 @@ import ( "github.com/gorilla/mux" "github.com/specterops/bloodhound/analysis" + "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/cache" "github.com/specterops/bloodhound/cypher/analyzer" "github.com/specterops/bloodhound/cypher/frontend" @@ -45,8 +47,6 @@ import ( "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/api/bloodhoundgraph" "github.com/specterops/bloodhound/src/config" bhCtx "github.com/specterops/bloodhound/src/ctx" @@ -398,7 +398,7 @@ func (s *GraphQuery) PrepareCypherQuery(rawCypher string) (PreparedQuery, error) return graphQuery, err } else if !s.DisableCypherComplexityLimit && complexityMeasure.Weight > MaxQueryComplexityWeightAllowed { // log query details if it is rejected due to high complexity - highComplexityLog := log.WithLevel(log.LevelError) + highComplexityLog := bhlog.WithLevel(bhlog.LevelError) highComplexityLog.Str("query", strippedQueryBuffer.String()) highComplexityLog.Msg(fmt.Sprintf("Query rejected. Query weight: %d. Maximum allowed weight: %d", complexityMeasure.Weight, MaxQueryComplexityWeightAllowed)) @@ -457,7 +457,7 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i var reductionFactor int64 availableRuntime, reductionFactor = applyTimeoutReduction(pQuery.complexity.Weight, availableRuntime) - logEvent := log.WithLevel(log.LevelInfo) + logEvent := bhlog.WithLevel(bhlog.LevelInfo) logEvent.Str("query", pQuery.StrippedQuery) logEvent.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) logEvent.Str("reduction factor", strconv.FormatInt(reductionFactor, 10)) @@ -480,7 +480,7 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i runtime := time.Since(start) - logEvent := log.WithLevel(log.LevelInfo) + logEvent := bhlog.WithLevel(bhlog.LevelInfo) logEvent.Str("query", pQuery.StrippedQuery) logEvent.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) logEvent.Msg(fmt.Sprintf("Executed user cypher query with cost %d in %.2f seconds", pQuery.complexity.Weight, runtime.Seconds())) @@ -488,7 +488,7 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i if err != nil { // Log query details if neo4j times out if util.IsNeoTimeoutError(err) { - timeoutLog := log.WithLevel(log.LevelError) + timeoutLog := bhlog.WithLevel(bhlog.LevelError) timeoutLog.Str("query", pQuery.StrippedQuery) timeoutLog.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) timeoutLog.Msg("Neo4j timed out while executing cypher query") diff --git a/cmd/api/src/services/agi/agi.go b/cmd/api/src/services/agi/agi.go index d5184c163f..5ddfe1d2e5 100644 --- a/cmd/api/src/services/agi/agi.go +++ b/cmd/api/src/services/agi/agi.go @@ -24,9 +24,8 @@ import ( "slices" "strings" - "github.com/specterops/bloodhound/log/measure" - "github.com/specterops/bloodhound/analysis" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" diff --git a/cmd/api/src/services/dataquality/dataquality.go b/cmd/api/src/services/dataquality/dataquality.go index 14f2b5f2d0..854255a788 100644 --- a/cmd/api/src/services/dataquality/dataquality.go +++ b/cmd/api/src/services/dataquality/dataquality.go @@ -22,8 +22,8 @@ import ( "fmt" "log/slog" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/src/analysis/ad" "github.com/specterops/bloodhound/src/analysis/azure" "github.com/specterops/bloodhound/src/model" diff --git a/go.work b/go.work index ec312ddef2..b24dd8e828 100644 --- a/go.work +++ b/go.work @@ -19,6 +19,7 @@ go 1.23 use ( ./cmd/api/src ./packages/go/analysis + ./packages/go/bhlog ./packages/go/bomenc ./packages/go/cache ./packages/go/conftool @@ -29,7 +30,6 @@ use ( ./packages/go/graphschema ./packages/go/headers ./packages/go/lab - ./packages/go/log ./packages/go/mediatypes ./packages/go/openapi ./packages/go/params diff --git a/packages/go/analysis/ad/ad.go b/packages/go/analysis/ad/ad.go index 56784b57cc..550f00d252 100644 --- a/packages/go/analysis/ad/ad.go +++ b/packages/go/analysis/ad/ad.go @@ -24,11 +24,9 @@ import ( "strings" "time" - "github.com/specterops/bloodhound/log/measure" - "github.com/specterops/bloodhound/analysis/impact" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/cardinality" - "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" diff --git a/packages/go/analysis/ad/membership.go b/packages/go/analysis/ad/membership.go index 5024c8ae18..4e17476ab8 100644 --- a/packages/go/analysis/ad/membership.go +++ b/packages/go/analysis/ad/membership.go @@ -23,13 +23,13 @@ import ( "github.com/specterops/bloodhound/analysis" "github.com/specterops/bloodhound/analysis/impact" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/graphschema/ad" - "github.com/specterops/bloodhound/log/measure" ) func ResolveAllGroupMemberships(ctx context.Context, db graph.Database, additionalCriteria ...graph.Criteria) (impact.PathAggregator, error) { diff --git a/packages/go/analysis/ad/queries.go b/packages/go/analysis/ad/queries.go index 9afc3b5bd1..20ff3de7bb 100644 --- a/packages/go/analysis/ad/queries.go +++ b/packages/go/analysis/ad/queries.go @@ -25,6 +25,7 @@ import ( "github.com/RoaringBitmap/roaring/roaring64" "github.com/specterops/bloodhound/analysis" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/graphcache" @@ -33,7 +34,6 @@ import ( "github.com/specterops/bloodhound/dawgs/traversal" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log/measure" ) func FetchGraphDBTierZeroTaggedAssets(ctx context.Context, db graph.Database, domainSID string) (graph.NodeSet, error) { diff --git a/packages/go/analysis/analysis.go b/packages/go/analysis/analysis.go index af375e65cf..b7d3d64409 100644 --- a/packages/go/analysis/analysis.go +++ b/packages/go/analysis/analysis.go @@ -22,13 +22,13 @@ import ( "log/slog" "slices" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log/measure" "github.com/specterops/bloodhound/slicesext" ) diff --git a/packages/go/analysis/azure/queries.go b/packages/go/analysis/azure/queries.go index 0a9f5dd8c4..acf08b0314 100644 --- a/packages/go/analysis/azure/queries.go +++ b/packages/go/analysis/azure/queries.go @@ -23,13 +23,13 @@ import ( "strings" "github.com/RoaringBitmap/roaring/roaring64" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/ad" "github.com/specterops/bloodhound/graphschema/azure" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log/measure" ) func FetchCollectedTenants(tx graph.Transaction) (graph.NodeSet, error) { diff --git a/packages/go/analysis/azure/role.go b/packages/go/analysis/azure/role.go index cb384bcaca..39752394b9 100644 --- a/packages/go/analysis/azure/role.go +++ b/packages/go/analysis/azure/role.go @@ -22,12 +22,12 @@ import ( "log/slog" "slices" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/azure" - "github.com/specterops/bloodhound/log/measure" ) func NewRoleEntityDetails(node *graph.Node) RoleDetails { diff --git a/packages/go/analysis/azure/tenant.go b/packages/go/analysis/azure/tenant.go index 81ffa77468..e19270d4b6 100644 --- a/packages/go/analysis/azure/tenant.go +++ b/packages/go/analysis/azure/tenant.go @@ -21,11 +21,11 @@ import ( "fmt" "log/slog" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/graphschema/azure" - "github.com/specterops/bloodhound/log/measure" ) func NewTenantEntityDetails(node *graph.Node) TenantDetails { diff --git a/packages/go/analysis/impact/aggregator.go b/packages/go/analysis/impact/aggregator.go index 2bd78fa8fa..84d7809e20 100644 --- a/packages/go/analysis/impact/aggregator.go +++ b/packages/go/analysis/impact/aggregator.go @@ -20,8 +20,7 @@ import ( "fmt" "log/slog" - "github.com/specterops/bloodhound/log/measure" - + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" ) diff --git a/packages/go/analysis/impact/id_aggregator.go b/packages/go/analysis/impact/id_aggregator.go index 99676c6fa4..d76dea3579 100644 --- a/packages/go/analysis/impact/id_aggregator.go +++ b/packages/go/analysis/impact/id_aggregator.go @@ -21,9 +21,9 @@ import ( "log/slog" "sync" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/log/measure" ) type PathAggregator interface { diff --git a/packages/go/analysis/post.go b/packages/go/analysis/post.go index 133f769100..6bc35b2477 100644 --- a/packages/go/analysis/post.go +++ b/packages/go/analysis/post.go @@ -22,13 +22,13 @@ import ( "log/slog" "sort" + "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/dawgs/query" "github.com/specterops/bloodhound/dawgs/util/channels" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" - "github.com/specterops/bloodhound/log/measure" ) func statsSortedKeys(value map[graph.Kind]int) []graph.Kind { @@ -91,7 +91,7 @@ func (s PostProcessingStats) Merge(other PostProcessingStats) { func (s PostProcessingStats) LogStats() { // Only output stats during debug runs - if log.GlobalLevel() > log.LevelDebug { + if bhlog.GlobalLevel() > bhlog.LevelDebug { return } diff --git a/packages/go/analysis/post_operation.go b/packages/go/analysis/post_operation.go index 3f393100fe..73d5b59f80 100644 --- a/packages/go/analysis/post_operation.go +++ b/packages/go/analysis/post_operation.go @@ -24,11 +24,11 @@ import ( "sync/atomic" "time" + "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" "github.com/specterops/bloodhound/graphschema/common" - "github.com/specterops/bloodhound/log" - "github.com/specterops/bloodhound/log/measure" ) type StatTrackedOperation[T any] struct { @@ -132,7 +132,7 @@ func (s *AtomicPostProcessingStats) Merge(other *AtomicPostProcessingStats) { func (s *AtomicPostProcessingStats) LogStats() { // Only output stats during debug runs - if log.GlobalLevel() > log.LevelDebug { + if bhlog.GlobalLevel() > bhlog.LevelDebug { return } diff --git a/packages/go/log/cmd/logtest/main.go b/packages/go/bhlog/cmd/logtest/main.go similarity index 100% rename from packages/go/log/cmd/logtest/main.go rename to packages/go/bhlog/cmd/logtest/main.go diff --git a/packages/go/log/config.go b/packages/go/bhlog/config.go similarity index 99% rename from packages/go/log/config.go rename to packages/go/bhlog/config.go index 8b79686288..1c1c630729 100644 --- a/packages/go/log/config.go +++ b/packages/go/bhlog/config.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package log +package bhlog import ( "time" diff --git a/packages/go/log/event.go b/packages/go/bhlog/event.go similarity index 99% rename from packages/go/log/event.go rename to packages/go/bhlog/event.go index f341ffcd54..7b86953386 100644 --- a/packages/go/log/event.go +++ b/packages/go/bhlog/event.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package log +package bhlog //go:generate go run go.uber.org/mock/mockgen -copyright_file=../../../LICENSE.header -destination=./mocks/event.go -package=mocks . Event diff --git a/packages/go/log/go.mod b/packages/go/bhlog/go.mod similarity index 94% rename from packages/go/log/go.mod rename to packages/go/bhlog/go.mod index e3d1d6b2b0..6682d3ecfe 100644 --- a/packages/go/log/go.mod +++ b/packages/go/bhlog/go.mod @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -module github.com/specterops/bloodhound/log +module github.com/specterops/bloodhound/bhlog go 1.23 diff --git a/packages/go/log/go.sum b/packages/go/bhlog/go.sum similarity index 100% rename from packages/go/log/go.sum rename to packages/go/bhlog/go.sum diff --git a/packages/go/log/golog.go b/packages/go/bhlog/golog.go similarity index 98% rename from packages/go/log/golog.go rename to packages/go/bhlog/golog.go index 903c928db0..70128d4c73 100644 --- a/packages/go/log/golog.go +++ b/packages/go/bhlog/golog.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package log +package bhlog import golog "log" diff --git a/packages/go/log/handlers/handlers.go b/packages/go/bhlog/handlers/handlers.go similarity index 100% rename from packages/go/log/handlers/handlers.go rename to packages/go/bhlog/handlers/handlers.go diff --git a/packages/go/log/log.go b/packages/go/bhlog/log.go similarity index 99% rename from packages/go/log/log.go rename to packages/go/bhlog/log.go index b0f1450008..3914748561 100644 --- a/packages/go/log/log.go +++ b/packages/go/bhlog/log.go @@ -14,7 +14,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package log +package bhlog import ( "fmt" diff --git a/packages/go/log/measure/measure.go b/packages/go/bhlog/measure/measure.go similarity index 100% rename from packages/go/log/measure/measure.go rename to packages/go/bhlog/measure/measure.go diff --git a/packages/go/log/mocks/event.go b/packages/go/bhlog/mocks/event.go similarity index 86% rename from packages/go/log/mocks/event.go rename to packages/go/bhlog/mocks/event.go index a90f3bba83..c11c43893c 100644 --- a/packages/go/log/mocks/event.go +++ b/packages/go/bhlog/mocks/event.go @@ -25,7 +25,7 @@ import ( reflect "reflect" time "time" - log "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" gomock "go.uber.org/mock/gomock" ) @@ -53,10 +53,10 @@ func (m *MockEvent) EXPECT() *MockEventMockRecorder { } // Any mocks base method. -func (m *MockEvent) Any(arg0 string, arg1 interface{}) log.Event { +func (m *MockEvent) Any(arg0 string, arg1 interface{}) bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Any", arg0, arg1) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -67,14 +67,14 @@ func (mr *MockEventMockRecorder) Any(arg0, arg1 interface{}) *gomock.Call { } // Bool mocks base method. -func (m *MockEvent) Bool(arg0 string, arg1 ...bool) log.Event { +func (m *MockEvent) Bool(arg0 string, arg1 ...bool) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Bool", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -86,10 +86,10 @@ func (mr *MockEventMockRecorder) Bool(arg0 interface{}, arg1 ...interface{}) *go } // Bytes mocks base method. -func (m *MockEvent) Bytes(arg0 string, arg1 []byte) log.Event { +func (m *MockEvent) Bytes(arg0 string, arg1 []byte) bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Bytes", arg0, arg1) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -100,14 +100,14 @@ func (mr *MockEventMockRecorder) Bytes(arg0, arg1 interface{}) *gomock.Call { } // Caller mocks base method. -func (m *MockEvent) Caller(arg0 ...int) log.Event { +func (m *MockEvent) Caller(arg0 ...int) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{} for _, a := range arg0 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Caller", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -118,10 +118,10 @@ func (mr *MockEventMockRecorder) Caller(arg0 ...interface{}) *gomock.Call { } // Discard mocks base method. -func (m *MockEvent) Discard() log.Event { +func (m *MockEvent) Discard() bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Discard") - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -132,14 +132,14 @@ func (mr *MockEventMockRecorder) Discard() *gomock.Call { } // Duration mocks base method. -func (m *MockEvent) Duration(arg0 string, arg1 ...time.Duration) log.Event { +func (m *MockEvent) Duration(arg0 string, arg1 ...time.Duration) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Duration", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -165,14 +165,14 @@ func (mr *MockEventMockRecorder) Enabled() *gomock.Call { } // Fault mocks base method. -func (m *MockEvent) Fault(arg0 ...error) log.Event { +func (m *MockEvent) Fault(arg0 ...error) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{} for _, a := range arg0 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Fault", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -183,14 +183,14 @@ func (mr *MockEventMockRecorder) Fault(arg0 ...interface{}) *gomock.Call { } // Float32 mocks base method. -func (m *MockEvent) Float32(arg0 string, arg1 ...float32) log.Event { +func (m *MockEvent) Float32(arg0 string, arg1 ...float32) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Float32", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -202,14 +202,14 @@ func (mr *MockEventMockRecorder) Float32(arg0 interface{}, arg1 ...interface{}) } // Float64 mocks base method. -func (m *MockEvent) Float64(arg0 string, arg1 ...float64) log.Event { +func (m *MockEvent) Float64(arg0 string, arg1 ...float64) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Float64", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -221,10 +221,10 @@ func (mr *MockEventMockRecorder) Float64(arg0 interface{}, arg1 ...interface{}) } // Hex mocks base method. -func (m *MockEvent) Hex(arg0 string, arg1 []byte) log.Event { +func (m *MockEvent) Hex(arg0 string, arg1 []byte) bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Hex", arg0, arg1) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -235,10 +235,10 @@ func (mr *MockEventMockRecorder) Hex(arg0, arg1 interface{}) *gomock.Call { } // IPAddr mocks base method. -func (m *MockEvent) IPAddr(arg0 string, arg1 net.IP) log.Event { +func (m *MockEvent) IPAddr(arg0 string, arg1 net.IP) bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IPAddr", arg0, arg1) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -249,10 +249,10 @@ func (mr *MockEventMockRecorder) IPAddr(arg0, arg1 interface{}) *gomock.Call { } // IPPrefix mocks base method. -func (m *MockEvent) IPPrefix(arg0 string, arg1 net.IPNet) log.Event { +func (m *MockEvent) IPPrefix(arg0 string, arg1 net.IPNet) bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IPPrefix", arg0, arg1) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -263,14 +263,14 @@ func (mr *MockEventMockRecorder) IPPrefix(arg0, arg1 interface{}) *gomock.Call { } // Int mocks base method. -func (m *MockEvent) Int(arg0 string, arg1 ...int) log.Event { +func (m *MockEvent) Int(arg0 string, arg1 ...int) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Int", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -282,14 +282,14 @@ func (mr *MockEventMockRecorder) Int(arg0 interface{}, arg1 ...interface{}) *gom } // Int16 mocks base method. -func (m *MockEvent) Int16(arg0 string, arg1 ...int16) log.Event { +func (m *MockEvent) Int16(arg0 string, arg1 ...int16) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Int16", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -301,14 +301,14 @@ func (mr *MockEventMockRecorder) Int16(arg0 interface{}, arg1 ...interface{}) *g } // Int32 mocks base method. -func (m *MockEvent) Int32(arg0 string, arg1 ...int32) log.Event { +func (m *MockEvent) Int32(arg0 string, arg1 ...int32) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Int32", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -320,14 +320,14 @@ func (mr *MockEventMockRecorder) Int32(arg0 interface{}, arg1 ...interface{}) *g } // Int64 mocks base method. -func (m *MockEvent) Int64(arg0 string, arg1 ...int64) log.Event { +func (m *MockEvent) Int64(arg0 string, arg1 ...int64) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Int64", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -339,14 +339,14 @@ func (mr *MockEventMockRecorder) Int64(arg0 interface{}, arg1 ...interface{}) *g } // Int8 mocks base method. -func (m *MockEvent) Int8(arg0 string, arg1 ...int8) log.Event { +func (m *MockEvent) Int8(arg0 string, arg1 ...int8) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Int8", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -358,10 +358,10 @@ func (mr *MockEventMockRecorder) Int8(arg0 interface{}, arg1 ...interface{}) *go } // MACAddr mocks base method. -func (m *MockEvent) MACAddr(arg0 string, arg1 net.HardwareAddr) log.Event { +func (m *MockEvent) MACAddr(arg0 string, arg1 net.HardwareAddr) bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MACAddr", arg0, arg1) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -413,10 +413,10 @@ func (mr *MockEventMockRecorder) Send() *gomock.Call { } // Stack mocks base method. -func (m *MockEvent) Stack() log.Event { +func (m *MockEvent) Stack() bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Stack") - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -427,14 +427,14 @@ func (mr *MockEventMockRecorder) Stack() *gomock.Call { } // Str mocks base method. -func (m *MockEvent) Str(arg0 string, arg1 ...string) log.Event { +func (m *MockEvent) Str(arg0 string, arg1 ...string) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Str", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -446,14 +446,14 @@ func (mr *MockEventMockRecorder) Str(arg0 interface{}, arg1 ...interface{}) *gom } // Time mocks base method. -func (m *MockEvent) Time(arg0 string, arg1 ...time.Time) log.Event { +func (m *MockEvent) Time(arg0 string, arg1 ...time.Time) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Time", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -465,10 +465,10 @@ func (mr *MockEventMockRecorder) Time(arg0 interface{}, arg1 ...interface{}) *go } // TimeDiff mocks base method. -func (m *MockEvent) TimeDiff(arg0 string, arg1, arg2 time.Time) log.Event { +func (m *MockEvent) TimeDiff(arg0 string, arg1, arg2 time.Time) bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "TimeDiff", arg0, arg1, arg2) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -479,10 +479,10 @@ func (mr *MockEventMockRecorder) TimeDiff(arg0, arg1, arg2 interface{}) *gomock. } // Timestamp mocks base method. -func (m *MockEvent) Timestamp() log.Event { +func (m *MockEvent) Timestamp() bhlog.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Timestamp") - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -493,14 +493,14 @@ func (mr *MockEventMockRecorder) Timestamp() *gomock.Call { } // Uint mocks base method. -func (m *MockEvent) Uint(arg0 string, arg1 ...uint) log.Event { +func (m *MockEvent) Uint(arg0 string, arg1 ...uint) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Uint", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -512,14 +512,14 @@ func (mr *MockEventMockRecorder) Uint(arg0 interface{}, arg1 ...interface{}) *go } // Uint16 mocks base method. -func (m *MockEvent) Uint16(arg0 string, arg1 ...uint16) log.Event { +func (m *MockEvent) Uint16(arg0 string, arg1 ...uint16) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Uint16", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -531,14 +531,14 @@ func (mr *MockEventMockRecorder) Uint16(arg0 interface{}, arg1 ...interface{}) * } // Uint32 mocks base method. -func (m *MockEvent) Uint32(arg0 string, arg1 ...uint32) log.Event { +func (m *MockEvent) Uint32(arg0 string, arg1 ...uint32) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Uint32", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -550,14 +550,14 @@ func (mr *MockEventMockRecorder) Uint32(arg0 interface{}, arg1 ...interface{}) * } // Uint64 mocks base method. -func (m *MockEvent) Uint64(arg0 string, arg1 ...uint64) log.Event { +func (m *MockEvent) Uint64(arg0 string, arg1 ...uint64) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Uint64", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } @@ -569,14 +569,14 @@ func (mr *MockEventMockRecorder) Uint64(arg0 interface{}, arg1 ...interface{}) * } // Uint8 mocks base method. -func (m *MockEvent) Uint8(arg0 string, arg1 ...byte) log.Event { +func (m *MockEvent) Uint8(arg0 string, arg1 ...byte) bhlog.Event { m.ctrl.T.Helper() varargs := []interface{}{arg0} for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Uint8", varargs...) - ret0, _ := ret[0].(log.Event) + ret0, _ := ret[0].(bhlog.Event) return ret0 } diff --git a/packages/go/dawgs/go.mod b/packages/go/dawgs/go.mod index 368f2c5e72..fef1916000 100644 --- a/packages/go/dawgs/go.mod +++ b/packages/go/dawgs/go.mod @@ -26,7 +26,7 @@ require ( github.com/jackc/pgx/v5 v5.7.1 github.com/neo4j/neo4j-go-driver/v5 v5.9.0 github.com/specterops/bloodhound/cypher v0.0.0-00010101000000-000000000000 - github.com/specterops/bloodhound/log v0.0.0-00010101000000-000000000000 + github.com/specterops/bloodhound/bhlog v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.2.0 ) @@ -57,5 +57,5 @@ require ( replace ( github.com/specterops/bloodhound/cypher => ../cypher - github.com/specterops/bloodhound/log => ../log + github.com/specterops/bloodhound/bhlog => ../bhlog ) diff --git a/packages/go/dawgs/ops/traversal.go b/packages/go/dawgs/ops/traversal.go index 7838d12645..7bf6528db3 100644 --- a/packages/go/dawgs/ops/traversal.go +++ b/packages/go/dawgs/ops/traversal.go @@ -22,9 +22,9 @@ import ( "log/slog" "github.com/RoaringBitmap/roaring/roaring64" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/query" - "github.com/specterops/bloodhound/log/measure" ) type LimitSkipTracker struct { diff --git a/packages/go/dawgs/traversal/traversal.go b/packages/go/dawgs/traversal/traversal.go index ee15b5edc5..522edc6d88 100644 --- a/packages/go/dawgs/traversal/traversal.go +++ b/packages/go/dawgs/traversal/traversal.go @@ -24,6 +24,7 @@ import ( "sync" "sync/atomic" + "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/cardinality" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/graphcache" @@ -32,7 +33,6 @@ import ( "github.com/specterops/bloodhound/dawgs/util" "github.com/specterops/bloodhound/dawgs/util/atomics" "github.com/specterops/bloodhound/dawgs/util/channels" - "github.com/specterops/bloodhound/log/measure" ) // Driver is a function that drives sending queries to the graph and retrieving vertexes and edges. Traversal diff --git a/packages/go/schemagen/main.go b/packages/go/schemagen/main.go index 7a6d875597..db38add244 100644 --- a/packages/go/schemagen/main.go +++ b/packages/go/schemagen/main.go @@ -23,7 +23,7 @@ import ( "path/filepath" "cuelang.org/go/cue/errors" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/schemagen/generator" "github.com/specterops/bloodhound/schemagen/model" "github.com/specterops/bloodhound/schemagen/tsgen" @@ -70,7 +70,7 @@ func GenerateSharedTypeScript(projectRoot string, rootSchema Schema) error { } func main() { - log.Configure(log.DefaultConfiguration().WithLevel(log.LevelDebug)) + bhlog.Configure(bhlog.DefaultConfiguration().WithLevel(bhlog.LevelDebug)) cfgBuilder := generator.NewConfigBuilder("/schemas") diff --git a/packages/go/stbernard/cmdrunner/cmdrunner.go b/packages/go/stbernard/cmdrunner/cmdrunner.go index 999fffb78d..2faf2a3bb4 100644 --- a/packages/go/stbernard/cmdrunner/cmdrunner.go +++ b/packages/go/stbernard/cmdrunner/cmdrunner.go @@ -19,12 +19,12 @@ package cmdrunner import ( "errors" "fmt" + "github.com/specterops/bloodhound/bhlog" "log/slog" "os" "os/exec" "strings" - "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/packages/go/stbernard/environment" ) @@ -43,7 +43,7 @@ func Run(command string, args []string, path string, env environment.Environment cmdstr = command + " " + args[0] cmd = exec.Command(command, args...) - debugEnabled = log.GlobalAccepts(log.LevelDebug) + debugEnabled = bhlog.GlobalAccepts(bhlog.LevelDebug) ) cmd.Env = env.Slice() diff --git a/packages/go/stbernard/command/command.go b/packages/go/stbernard/command/command.go index 82240bb3a4..285400e0b6 100644 --- a/packages/go/stbernard/command/command.go +++ b/packages/go/stbernard/command/command.go @@ -23,7 +23,7 @@ import ( "os" "strings" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/packages/go/stbernard/command/analysis" "github.com/specterops/bloodhound/packages/go/stbernard/command/builder" "github.com/specterops/bloodhound/packages/go/stbernard/command/cover" @@ -139,11 +139,11 @@ func ParseCLI(env environment.Environment) (CommandRunner, error) { } if *verboseEnabled { - log.SetGlobalLevel(log.LevelInfo) + bhlog.SetGlobalLevel(bhlog.LevelInfo) } if *debugEnabled { - log.SetGlobalLevel(log.LevelDebug) + bhlog.SetGlobalLevel(bhlog.LevelDebug) } return currentCmd, currentCmd.Parse(cmdStartIdx) diff --git a/packages/go/stbernard/git/git.go b/packages/go/stbernard/git/git.go index 4264dfccb8..61556cb1e2 100644 --- a/packages/go/stbernard/git/git.go +++ b/packages/go/stbernard/git/git.go @@ -28,7 +28,7 @@ import ( "strings" "github.com/Masterminds/semver/v3" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/packages/go/stbernard/cmdrunner" "github.com/specterops/bloodhound/packages/go/stbernard/environment" ) @@ -77,7 +77,7 @@ func CheckClean(cwd string, env environment.Environment) (bool, error) { cmd.Env = env.Slice() cmd.Dir = cwd - if log.GlobalAccepts(log.LevelDebug) { + if bhlog.GlobalAccepts(bhlog.LevelDebug) { cmd.Stderr = os.Stderr } @@ -168,7 +168,7 @@ func getAllVersionTags(cwd string, env environment.Environment) ([]string, error cmd.Dir = cwd cmd.Stdout = &output - if log.GlobalAccepts(log.LevelDebug) { + if bhlog.GlobalAccepts(bhlog.LevelDebug) { cmd.Stderr = os.Stderr } diff --git a/packages/go/stbernard/go.mod b/packages/go/stbernard/go.mod index 1eaa44b462..101c85e0ee 100644 --- a/packages/go/stbernard/go.mod +++ b/packages/go/stbernard/go.mod @@ -21,7 +21,7 @@ go 1.23 require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/gofrs/uuid v4.4.0+incompatible - github.com/specterops/bloodhound/log v0.0.0-00010101000000-000000000000 + github.com/specterops/bloodhound/bhlog v0.0.0-00010101000000-000000000000 github.com/specterops/bloodhound/slicesext v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.9.0 golang.org/x/mod v0.21.0 @@ -30,16 +30,12 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rs/zerolog v1.29.1 // indirect - golang.org/x/sys v0.28.0 // indirect golang.org/x/tools v0.26.0 gopkg.in/yaml.v3 v3.0.1 // indirect ) replace ( - github.com/specterops/bloodhound/log => ../log + github.com/specterops/bloodhound/bhlog => ../bhlog github.com/specterops/bloodhound/slicesext => ../slicesext ) diff --git a/packages/go/stbernard/go.sum b/packages/go/stbernard/go.sum index 6473633265..dd5da281d2 100644 --- a/packages/go/stbernard/go.sum +++ b/packages/go/stbernard/go.sum @@ -1,37 +1,24 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= -github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index 160485bd5c..1feff87a6e 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -24,7 +24,7 @@ import ( "log/slog" "os" - "github.com/specterops/bloodhound/log" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/packages/go/stbernard/command" "github.com/specterops/bloodhound/packages/go/stbernard/environment" ) @@ -33,16 +33,16 @@ func main() { env := environment.NewEnvironment() var rawLvl = env[environment.LogLevelVarName] - log.ConfigureDefaults() + bhlog.ConfigureDefaults() if rawLvl == "" { rawLvl = "warn" } - if lvl, err := log.ParseLevel(rawLvl); err != nil { + if lvl, err := bhlog.ParseLevel(rawLvl); err != nil { slog.Error(fmt.Sprintf("Could not parse log level from %s: %v", environment.LogLevelVarName, err)) } else { - log.SetGlobalLevel(lvl) + bhlog.SetGlobalLevel(lvl) } if cmd, err := command.ParseCLI(env); errors.Is(err, command.ErrNoCmd) { From 03e65d6e0c91fba26440c73d2b3c956ccdc7323c Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 16:59:03 -0500 Subject: [PATCH 14/20] BED-4153: Update gormlogadapter test --- cmd/api/src/database/log_test.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cmd/api/src/database/log_test.go b/cmd/api/src/database/log_test.go index 39c7c00f90..ea2c99c1ca 100644 --- a/cmd/api/src/database/log_test.go +++ b/cmd/api/src/database/log_test.go @@ -18,6 +18,8 @@ package database_test import ( "bytes" + "fmt" + "github.com/specterops/bloodhound/bhlog/handlers" "log/slog" "strings" "testing" @@ -35,10 +37,11 @@ func TestGormLogAdapter_Info(t *testing.T) { ) var buf bytes.Buffer - slog.SetDefault(slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{}))) + slog.SetDefault(slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr}))) - gormLogAdapter.Info(nil, "message", "data", 1, "data2", "arg", "data3", 2.0) - if !strings.Contains(buf.String(), `message=message data=1 data2="arg" data3=2.0`) { - t.Error("failed to properly log through gorm adapter") + expected := fmt.Sprintf(`message="message %d %s %f"`, 1, "arg", 2.0) + gormLogAdapter.Info(nil, "message %d %s %f", 1, "arg", 2.0) + if !strings.Contains(buf.String(), expected) { + t.Errorf("gormLogAdapter output does not contain expected\nOutput:%sExpected:%s", buf.String(), expected) } } From 2ccd71dbef322845d54c2b8b55e973382fab9079 Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 21:22:58 -0500 Subject: [PATCH 15/20] BED-4153: Remove all legacy log functionality --- cmd/api/src/api/tools/logging.go | 5 +- cmd/api/src/bootstrap/util.go | 7 +- cmd/api/src/daemons/api/bhapi/api.go | 2 +- cmd/api/src/daemons/api/toolapi/api.go | 2 +- cmd/api/src/database/log.go | 16 +- cmd/api/src/queries/graph.go | 36 +- packages/go/analysis/post.go | 4 +- packages/go/analysis/post_operation.go | 4 +- packages/go/bhlog/bhlog.go | 80 +++ packages/go/bhlog/cmd/logtest/main.go | 28 - packages/go/bhlog/config.go | 104 ---- packages/go/bhlog/event.go | 393 ------------- packages/go/bhlog/go.mod | 11 - packages/go/bhlog/go.sum | 20 - packages/go/bhlog/golog.go | 37 -- packages/go/bhlog/handlers/handlers.go | 55 +- packages/go/bhlog/level/level.go | 21 + packages/go/bhlog/log.go | 139 ----- packages/go/bhlog/mocks/event.go | 588 ------------------- packages/go/dawgs/go.mod | 8 +- packages/go/dawgs/go.sum | 19 - packages/go/stbernard/cmdrunner/cmdrunner.go | 4 +- packages/go/stbernard/command/command.go | 7 +- packages/go/stbernard/git/git.go | 6 +- packages/go/stbernard/main.go | 6 +- 25 files changed, 167 insertions(+), 1435 deletions(-) create mode 100644 packages/go/bhlog/bhlog.go delete mode 100644 packages/go/bhlog/cmd/logtest/main.go delete mode 100644 packages/go/bhlog/config.go delete mode 100644 packages/go/bhlog/event.go delete mode 100644 packages/go/bhlog/golog.go create mode 100644 packages/go/bhlog/level/level.go delete mode 100644 packages/go/bhlog/log.go delete mode 100644 packages/go/bhlog/mocks/event.go diff --git a/cmd/api/src/api/tools/logging.go b/cmd/api/src/api/tools/logging.go index 376c8acd7c..65987dbcea 100644 --- a/cmd/api/src/api/tools/logging.go +++ b/cmd/api/src/api/tools/logging.go @@ -20,6 +20,7 @@ import ( "net/http" "github.com/specterops/bloodhound/bhlog" + bhLevel "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/src/api" ) @@ -33,7 +34,7 @@ type LoggingLevel struct { func GetLoggingDetails(response http.ResponseWriter, request *http.Request) { api.WriteJSONResponse(request.Context(), LoggingLevel{ - Level: bhlog.GlobalLevel().String(), + Level: bhLevel.GlobalLevel().String(), }, http.StatusOK, response) } @@ -49,7 +50,7 @@ func PutLoggingDetails(response http.ResponseWriter, request *http.Request) { Error: err.Error(), }, http.StatusBadRequest, response) } else { - bhlog.SetGlobalLevel(level) + bhLevel.SetGlobalLevel(level) response.WriteHeader(http.StatusOK) } } diff --git a/cmd/api/src/bootstrap/util.go b/cmd/api/src/bootstrap/util.go index 7ea8f13841..a94c808eee 100644 --- a/cmd/api/src/bootstrap/util.go +++ b/cmd/api/src/bootstrap/util.go @@ -23,6 +23,7 @@ import ( "os" "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/dawgs" "github.com/specterops/bloodhound/dawgs/drivers/neo4j" "github.com/specterops/bloodhound/dawgs/drivers/pg" @@ -107,7 +108,7 @@ func ConnectGraph(ctx context.Context, cfg config.Configuration) (*graph.Databas // InitializeLogging sets up output file logging, and returns errors if any func InitializeLogging(cfg config.Configuration) error { - var logLevel = bhlog.LevelInfo + var logLevel = slog.LevelInfo if cfg.LogLevel != "" { if parsedLevel, err := bhlog.ParseLevel(cfg.LogLevel); err != nil { @@ -117,7 +118,9 @@ func InitializeLogging(cfg config.Configuration) error { } } - bhlog.Configure(bhlog.DefaultConfiguration().WithLevel(logLevel)) + logger := bhlog.NewDefaultLogger() + slog.SetDefault(logger) + level.SetGlobalLevel(logLevel) slog.Info("Logging configured") return nil diff --git a/cmd/api/src/daemons/api/bhapi/api.go b/cmd/api/src/daemons/api/bhapi/api.go index af7d03c2d4..b0ca20beae 100644 --- a/cmd/api/src/daemons/api/bhapi/api.go +++ b/cmd/api/src/daemons/api/bhapi/api.go @@ -40,7 +40,7 @@ func NewDaemon(cfg config.Configuration, handler http.Handler) Daemon { server: &http.Server{ Addr: cfg.BindAddress, Handler: handler, - ErrorLog: bhlog.Adapter(bhlog.LevelError, "BHAPI", 0), + ErrorLog: bhlog.NewLogLogger("BHAPI"), }, } } diff --git a/cmd/api/src/daemons/api/toolapi/api.go b/cmd/api/src/daemons/api/toolapi/api.go index ae032d8008..b727593e6c 100644 --- a/cmd/api/src/daemons/api/toolapi/api.go +++ b/cmd/api/src/daemons/api/toolapi/api.go @@ -101,7 +101,7 @@ func NewDaemon[DBType database.Database](ctx context.Context, connections bootst server: &http.Server{ Addr: cfg.MetricsPort, Handler: router, - ErrorLog: bhlog.Adapter(bhlog.LevelError, "ToolAPI", 0), + ErrorLog: bhlog.NewLogLogger("ToolAPI"), }, } } diff --git a/cmd/api/src/database/log.go b/cmd/api/src/database/log.go index a0eb672d8c..941619ef9e 100644 --- a/cmd/api/src/database/log.go +++ b/cmd/api/src/database/log.go @@ -24,7 +24,7 @@ import ( "time" "github.com/specterops/bloodhound/bhlog" - "github.com/specterops/bloodhound/bhlog/handlers" + "github.com/specterops/bloodhound/bhlog/level" "gorm.io/gorm" "gorm.io/gorm/logger" ) @@ -52,15 +52,15 @@ func (s *GormLogAdapter) Error(ctx context.Context, msg string, data ...any) { } func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) { - if bhlog.GlobalLevel() > bhlog.LevelDebug { + if !level.GlobalAccepts(slog.LevelDebug) { return } if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { sql, _ := fc() - if slog.Default().Enabled(ctx, slog.LevelDebug) { - slog.ErrorContext(ctx, "Database error", "query", sql, "error", err, handlers.GetSlogCallStack()) + if level.GlobalAccepts(slog.LevelDebug) { + slog.ErrorContext(ctx, "Database error", "query", sql, "error", err, bhlog.GetCallStack()) } else { slog.ErrorContext(ctx, "Database error", "query", sql, "error", err) } @@ -70,16 +70,16 @@ func (s *GormLogAdapter) Trace(ctx context.Context, begin time.Time, fc func() ( if elapsed >= s.SlowQueryErrorThreshold { sql, rows := fc() - if slog.Default().Enabled(ctx, slog.LevelDebug) { - slog.ErrorContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "nums_rows", rows, "sql", sql, handlers.GetSlogCallStack()) + if level.GlobalAccepts(slog.LevelDebug) { + slog.ErrorContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "nums_rows", rows, "sql", sql, bhlog.GetCallStack()) } else { slog.ErrorContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "num_rows", rows) } } else if elapsed >= s.SlowQueryWarnThreshold { sql, rows := fc() - if bhlog.GlobalAccepts(bhlog.LevelDebug) { - slog.WarnContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "nums_rows", rows, "sql", sql, handlers.GetSlogCallStack()) + if level.GlobalAccepts(slog.LevelDebug) { + slog.WarnContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "nums_rows", rows, "sql", sql, bhlog.GetCallStack()) } else { slog.WarnContext(ctx, "Slow database query", "duration_ms", elapsed.Milliseconds(), "num_rows", rows) } diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index ac15e65a56..9bc5cb0703 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -34,7 +34,6 @@ import ( "github.com/gorilla/mux" "github.com/specterops/bloodhound/analysis" - "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/cache" "github.com/specterops/bloodhound/cypher/analyzer" @@ -398,9 +397,10 @@ func (s *GraphQuery) PrepareCypherQuery(rawCypher string) (PreparedQuery, error) return graphQuery, err } else if !s.DisableCypherComplexityLimit && complexityMeasure.Weight > MaxQueryComplexityWeightAllowed { // log query details if it is rejected due to high complexity - highComplexityLog := bhlog.WithLevel(bhlog.LevelError) - highComplexityLog.Str("query", strippedQueryBuffer.String()) - highComplexityLog.Msg(fmt.Sprintf("Query rejected. Query weight: %d. Maximum allowed weight: %d", complexityMeasure.Weight, MaxQueryComplexityWeightAllowed)) + slog.Error( + fmt.Sprintf("Query rejected. Query weight: %d. Maximum allowed weight: %d", complexityMeasure.Weight, MaxQueryComplexityWeightAllowed), + "query", strippedQueryBuffer.String(), + ) return graphQuery, ErrCypherQueryTooComplex } @@ -457,11 +457,12 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i var reductionFactor int64 availableRuntime, reductionFactor = applyTimeoutReduction(pQuery.complexity.Weight, availableRuntime) - logEvent := bhlog.WithLevel(bhlog.LevelInfo) - logEvent.Str("query", pQuery.StrippedQuery) - logEvent.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) - logEvent.Str("reduction factor", strconv.FormatInt(reductionFactor, 10)) - logEvent.Msg(fmt.Sprintf("Available timeout for query is set to: %.2f seconds", availableRuntime.Seconds())) + slog.Info( + fmt.Sprintf("Available timeout for query is set to: %.2f seconds", availableRuntime.Seconds()), + "query", pQuery.StrippedQuery, + "query cost", fmt.Sprintf("%d", pQuery.complexity.Weight), + "reduction factor", strconv.FormatInt(reductionFactor, 10), + ) } } @@ -480,18 +481,19 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i runtime := time.Since(start) - logEvent := bhlog.WithLevel(bhlog.LevelInfo) - logEvent.Str("query", pQuery.StrippedQuery) - logEvent.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) - logEvent.Msg(fmt.Sprintf("Executed user cypher query with cost %d in %.2f seconds", pQuery.complexity.Weight, runtime.Seconds())) + slog.Info( + fmt.Sprintf("Executed user cypher query with cost %d in %.2f seconds", pQuery.complexity.Weight, runtime.Seconds()), + "query", pQuery.StrippedQuery, + "query cost", fmt.Sprintf("%d", pQuery.complexity.Weight), + ) if err != nil { // Log query details if neo4j times out if util.IsNeoTimeoutError(err) { - timeoutLog := bhlog.WithLevel(bhlog.LevelError) - timeoutLog.Str("query", pQuery.StrippedQuery) - timeoutLog.Str("query cost", fmt.Sprintf("%d", pQuery.complexity.Weight)) - timeoutLog.Msg("Neo4j timed out while executing cypher query") + slog.Error("Neo4j timed out while executing cypher query", + "query", pQuery.StrippedQuery, + "query cost", fmt.Sprintf("%d", pQuery.complexity.Weight), + ) } else { slog.WarnContext(ctx, fmt.Sprintf("RawCypherQuery failed: %v", err)) } diff --git a/packages/go/analysis/post.go b/packages/go/analysis/post.go index 6bc35b2477..8e35335ff0 100644 --- a/packages/go/analysis/post.go +++ b/packages/go/analysis/post.go @@ -22,7 +22,7 @@ import ( "log/slog" "sort" - "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" @@ -91,7 +91,7 @@ func (s PostProcessingStats) Merge(other PostProcessingStats) { func (s PostProcessingStats) LogStats() { // Only output stats during debug runs - if bhlog.GlobalLevel() > bhlog.LevelDebug { + if level.GlobalAccepts(slog.LevelDebug) { return } diff --git a/packages/go/analysis/post_operation.go b/packages/go/analysis/post_operation.go index 73d5b59f80..dd022c18d1 100644 --- a/packages/go/analysis/post_operation.go +++ b/packages/go/analysis/post_operation.go @@ -24,7 +24,7 @@ import ( "sync/atomic" "time" - "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/bhlog/measure" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/dawgs/ops" @@ -132,7 +132,7 @@ func (s *AtomicPostProcessingStats) Merge(other *AtomicPostProcessingStats) { func (s *AtomicPostProcessingStats) LogStats() { // Only output stats during debug runs - if bhlog.GlobalLevel() > bhlog.LevelDebug { + if level.GlobalAccepts(slog.LevelDebug) { return } diff --git a/packages/go/bhlog/bhlog.go b/packages/go/bhlog/bhlog.go new file mode 100644 index 0000000000..de2bd7a67e --- /dev/null +++ b/packages/go/bhlog/bhlog.go @@ -0,0 +1,80 @@ +package bhlog + +import ( + "fmt" + "log" + "log/slog" + "os" + "runtime" + "strings" + + "github.com/specterops/bloodhound/bhlog/handlers" + "github.com/specterops/bloodhound/bhlog/level" + "github.com/specterops/bloodhound/src/auth" +) + +func NewDefaultLogger() *slog.Logger { + return slog.New(&handlers.ContextHandler{ + IDResolver: auth.NewIdentityResolver(), + Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: level.GetLevelVar(), ReplaceAttr: handlers.ReplaceMessageKey}), + }) +} + +func NewLogLogger(origin string) *log.Logger { + return slog.NewLogLogger(&handlers.OriginHandler{ + Origin: origin, + Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: level.GetLevelVar()}), + }, slog.LevelError) +} + +type stackFrame struct { + File string `json:"file"` + Line int `json:"line"` + Func string `json:"func"` +} + +func GetCallStack() slog.Attr { + var outputFrames []stackFrame + + pc := make([]uintptr, 25) // Arbitrarily only go to a call depth of 25 + n := runtime.Callers(1, pc) + if n == 0 { + return slog.Attr{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + + for { + frame, more := frames.Next() + + outputFrames = append(outputFrames, stackFrame{File: frame.File, Line: frame.Line, Func: frame.Function}) + + if !more { + break + } + } + + return slog.Any("stack", outputFrames) +} + +var ( + levelErrorValue = slog.LevelError.String() + levelWarnValue = slog.LevelWarn.String() + levelInfoValue = slog.LevelInfo.String() + levelDebugValue = slog.LevelDebug.String() +) + +func ParseLevel(rawLevel string) (slog.Level, error) { + switch strings.ToUpper(rawLevel) { + case levelErrorValue: + return slog.LevelError, nil + case levelWarnValue: + return slog.LevelWarn, nil + case levelInfoValue: + return slog.LevelInfo, nil + case levelDebugValue: + return slog.LevelDebug, nil + default: + return 0, fmt.Errorf("unknown log level: %s", rawLevel) + } +} diff --git a/packages/go/bhlog/cmd/logtest/main.go b/packages/go/bhlog/cmd/logtest/main.go deleted file mode 100644 index 7f072cd3a6..0000000000 --- a/packages/go/bhlog/cmd/logtest/main.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "fmt" - "log/slog" -) - -func main() { - slog.Info(fmt.Sprintf("This is an info log message: %s", "test")) - slog.Warn(fmt.Sprintf("This is a warning log message: %s", "test")) - slog.Error(fmt.Sprintf("This is a error log message: %s", "test")) -} diff --git a/packages/go/bhlog/config.go b/packages/go/bhlog/config.go deleted file mode 100644 index 1c1c630729..0000000000 --- a/packages/go/bhlog/config.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package bhlog - -import ( - "time" - - "github.com/rs/zerolog" -) - -const ( - // TimeFormatUnix defines a time format that makes time fields to be serialized as Unix timestamp integers. - TimeFormatUnix = "" - - // TimeFormatUnixMs defines a time format that makes time fields to be serialized as Unix timestamp integers in - // milliseconds. - TimeFormatUnixMs = "UNIXMS" - - // TimeFormatUnixMicro defines a time format that makes time fields to be serialized as Unix timestamp integers in - // microseconds. - TimeFormatUnixMicro = "UNIXMICRO" - - // TimeFormatUnixNano defines a time format that makes time fields to be serialized as Unix timestamp integers in - // nanoseconds. - TimeFormatUnixNano = "UNIXNANO" -) - -type Configuration struct { - // Level defines the global logging verbosity level. - Level Level - - // CallerSkipFrameCount is the number of stack frames to skip to find the caller. - CallerSkipFrameCount int - - // DurationFieldUnit defines the unit for time.Duration type fields added using the Dur function. - DurationFieldUnit time.Duration - - // TimeFieldFormat defines the time format of the Time field type. If set to TimeFormatUnix, TimeFormatUnixMs, - // TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX timestamp as integer. - TimeFieldFormat string -} - -// WithLevel returns this configuration with the specified log verbosity level. -func (s *Configuration) WithLevel(level Level) *Configuration { - s.Level = level - return s -} - -// WithCallerSkipFrameCount returns this configuration with the specified CallerSkipFrameCount. -func (s *Configuration) WithCallerSkipFrameCount(callerSkipFrameCount int) *Configuration { - s.CallerSkipFrameCount = callerSkipFrameCount - return s -} - -// WithDurationFieldUnit returns this configuration with the specified DurationFieldUnit. -func (s *Configuration) WithDurationFieldUnit(durationFieldUnit time.Duration) *Configuration { - s.DurationFieldUnit = durationFieldUnit - return s -} - -// WithTimeFieldFormat returns this configuration with the specified TimeFieldFormat. -func (s *Configuration) WithTimeFieldFormat(timeFieldFormat string) *Configuration { - s.TimeFieldFormat = timeFieldFormat - return s -} - -// DefaultConfiguration returns a set of default configuration values for logging. -func DefaultConfiguration() *Configuration { - return &Configuration{ - Level: LevelInfo, - CallerSkipFrameCount: 2, - DurationFieldUnit: time.Millisecond, - TimeFieldFormat: time.RFC3339Nano, - } -} - -// Configure applies the given configuration to the logging framework. -func Configure(config *Configuration) { - zerolog.TimeFieldFormat = config.TimeFieldFormat - zerolog.CallerSkipFrameCount = config.CallerSkipFrameCount - zerolog.DurationFieldUnit = config.DurationFieldUnit - - SetGlobalLevel(config.Level) -} - -// ConfigureDefaults is a helper function that configures the logging framework with a set of reasonable default -// settings. -func ConfigureDefaults() { - Configure(DefaultConfiguration()) -} diff --git a/packages/go/bhlog/event.go b/packages/go/bhlog/event.go deleted file mode 100644 index 7b86953386..0000000000 --- a/packages/go/bhlog/event.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package bhlog - -//go:generate go run go.uber.org/mock/mockgen -copyright_file=../../../LICENSE.header -destination=./mocks/event.go -package=mocks . Event - -import ( - "net" - "time" - - "github.com/rs/zerolog" -) - -// Event represents a log event. It is instanced by one of the level functions and finalized by the Msg or Msgf method. -// -// NOTICE: once finalized the Event should be disposed. Calling any function that performs finalization more than once -// can have unexpected results. -type Event interface { - // Enabled return false if the Event is going to be filtered out by - // log level or sampling. - Enabled() bool - - // Discard disables the event so Msg(f) won't print it. - Discard() Event - - // Msg sends the Event with msg added as the message field if not empty. - // - // NOTICE: once this method is called, the Event should be disposed. - // Calling Msg twice can have unexpected result. - Msg(msg string) - - // Send is equivalent to calling Msg(""). - // - // NOTICE: once this method is called, the Event should be disposed. - Send() - - // Msgf sends the event with formatted msg added as the message field if not empty. - // - // NOTICE: once this method is called, the Event should be disposed. - // Calling Msgf twice can have unexpected result. - Msgf(format string, v ...interface{}) - - // Str adds the field key with val as a string or []string to the Event context. - Str(key string, val ...string) Event - - // Bytes adds the field key with val as a string to the Event context. - // - // Runes outside of normal ASCII ranges will be hex-encoded in the resulting - // JSON. - Bytes(key string, val []byte) Event - - // Hex adds the field key with val as a hex string to the Event context. - Hex(key string, val []byte) Event - - // Fault adds the field "error" with serialized error or []error to the Event context. - // If err is nil, no field is added. - Fault(err ...error) Event - - // Stack enables stack trace printing for any errors passed to Fault(...). - Stack() Event - - // Bool adds the field key with val as a bool or []bool to the Event context. - Bool(key string, b ...bool) Event - - // Int adds the field key with i as an int or []int to the Event context. - Int(key string, i ...int) Event - - // Int8 adds the field key with i as an int8 or []int8 to the Event context. - Int8(key string, i ...int8) Event - - // Int16 adds the field key with i as an int16 or []int16 to the Event context. - Int16(key string, i ...int16) Event - - // Int32 adds the field key with i as an int32 or []int32 to the Event context. - Int32(key string, i ...int32) Event - - // Int64 adds the field key with i as an int64 or []int64 to the Event context. - Int64(key string, i ...int64) Event - - // Uint adds the field key with i as an uint or []uint to the Event context. - Uint(key string, i ...uint) Event - - // Uint8 adds the field key with i as an uint8 or []uint8 to the Event context. - Uint8(key string, i ...uint8) Event - - // Uint16 adds the field key with i as an uint16 or []uint16 to the Event context. - Uint16(key string, i ...uint16) Event - - // Uint32 adds the field key with i as an uint32 or []uint32 to the Event context. - Uint32(key string, i ...uint32) Event - - // Uint64 adds the field key with i as an uint64 or []uint64 to the Event context. - Uint64(key string, i ...uint64) Event - - // Float32 adds the field key with f as a float32 or []float32 to the Event context. - Float32(key string, f ...float32) Event - - // Float64 adds the field key with f as a float64 or []float64 to the Event context. - Float64(key string, f ...float64) Event - - // Timestamp adds the current local time as UNIX timestamp to the Event context with the "time" key. This is only - // useful when setting the time of an event to be submitted in the future. - // - // NOTE: This call won't dedupe the "time" key if the Event has one already. - Timestamp() Event - - // Time adds the field key with t formatted as a string or []string. - Time(key string, t ...time.Time) Event - - // Duration adds the field key as a duration or []duration. - Duration(key string, d ...time.Duration) Event - - // TimeDiff adds the field key with positive duration between time t and start. - // If time t is not greater than start, duration will be 0. - // - // Duration format follows the same principle as Dur(). - TimeDiff(key string, t time.Time, start time.Time) Event - - // Any adds the field key with the passed interface marshaled using reflection. - Any(key string, i any) Event - - // Caller adds the file:line of the caller. - // The argument skip is the number of stack frames to ascend. - Caller(skip ...int) Event - - // IPAddr adds IPv4 or IPv6 Address to the event. - IPAddr(key string, ip net.IP) Event - - // IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event. - IPPrefix(key string, pfx net.IPNet) Event - - // MACAddr adds MAC address to the event. - MACAddr(key string, ha net.HardwareAddr) Event -} - -type event struct { - event *zerolog.Event -} - -func (s *event) Str(key string, val ...string) Event { - if len(val) > 1 { - s.event = s.event.Strs(key, val) - } else { - s.event = s.event.Str(key, val[0]) - } - - return s -} - -func (s *event) Fault(err ...error) Event { - if len(err) > 1 { - s.event = s.event.Errs(zerolog.ErrorFieldName, err) - } else { - s.event = s.event.Err(err[0]) - } - - return s -} - -func (s *event) Bool(key string, b ...bool) Event { - if len(b) > 1 { - s.event = s.event.Bools(key, b) - } else { - s.event = s.event.Bool(key, b[0]) - } - - return s -} - -func (s *event) Int(key string, i ...int) Event { - if len(i) > 1 { - s.event = s.event.Ints(key, i) - } else { - s.event = s.event.Int(key, i[0]) - } - - return s -} - -func (s *event) Int8(key string, i ...int8) Event { - if len(i) > 1 { - s.event = s.event.Ints8(key, i) - } else { - s.event = s.event.Int8(key, i[0]) - } - - return s -} - -func (s *event) Int16(key string, i ...int16) Event { - if len(i) > 1 { - s.event = s.event.Ints16(key, i) - } else { - s.event = s.event.Int16(key, i[0]) - } - - return s -} - -func (s *event) Int32(key string, i ...int32) Event { - if len(i) > 1 { - s.event = s.event.Ints32(key, i) - } else { - s.event = s.event.Int32(key, i[0]) - } - - return s -} - -func (s *event) Int64(key string, i ...int64) Event { - if len(i) > 1 { - s.event = s.event.Ints64(key, i) - } else { - s.event = s.event.Int64(key, i[0]) - } - - return s -} - -func (s *event) Uint(key string, i ...uint) Event { - if len(i) > 1 { - s.event = s.event.Uints(key, i) - } else { - s.event = s.event.Uint(key, i[0]) - } - - return s -} - -func (s *event) Uint8(key string, i ...uint8) Event { - if len(i) > 1 { - s.event = s.event.Uints8(key, i) - } else { - s.event = s.event.Uint8(key, i[0]) - } - - return s -} - -func (s *event) Uint16(key string, i ...uint16) Event { - if len(i) > 1 { - s.event = s.event.Uints16(key, i) - } else { - s.event = s.event.Uint16(key, i[0]) - } - - return s -} - -func (s *event) Uint32(key string, i ...uint32) Event { - if len(i) > 1 { - s.event = s.event.Uints32(key, i) - } else { - s.event = s.event.Uint32(key, i[0]) - } - - return s -} - -func (s *event) Uint64(key string, i ...uint64) Event { - if len(i) > 1 { - s.event = s.event.Uints64(key, i) - } else { - s.event = s.event.Uint64(key, i[0]) - } - - return s -} - -func (s *event) Float32(key string, f ...float32) Event { - if len(f) > 1 { - s.event = s.event.Floats32(key, f) - } else { - s.event = s.event.Float32(key, f[0]) - } - - return s -} - -func (s *event) Float64(key string, f ...float64) Event { - if len(f) > 1 { - s.event = s.event.Floats64(key, f) - } else { - s.event = s.event.Float64(key, f[0]) - } - - return s -} - -func (s *event) Time(key string, t ...time.Time) Event { - if len(t) > 1 { - s.event = s.event.Times(key, t) - } else { - s.event = s.event.Time(key, t[0]) - } - - return s -} - -func (s *event) Duration(key string, d ...time.Duration) Event { - if len(d) > 1 { - s.event = s.event.Durs(key, d) - } else { - s.event = s.event.Dur(key, d[0]) - } - - return s -} - -func (s *event) Any(key string, i any) Event { - s.event = s.event.Interface(key, i) - return s -} - -func (s *event) Enabled() bool { - return s.event.Enabled() -} - -func (s *event) Discard() Event { - s.event = s.event.Discard() - return s -} - -func (s *event) Msg(msg string) { - s.event.Msg(msg) -} - -func (s *event) Send() { - s.event.Send() -} - -func (s *event) Msgf(format string, v ...interface{}) { - s.event.Msgf(format, v...) -} - -func (s *event) Bytes(key string, val []byte) Event { - s.event = s.event.Bytes(key, val) - return s -} - -func (s *event) Hex(key string, val []byte) Event { - s.event = s.event.Hex(key, val) - return s -} - -func (s *event) Stack() Event { - s.event = s.event.Stack() - return s -} - -func (s *event) Timestamp() Event { - s.event = s.event.Timestamp() - return s -} - -func (s *event) TimeDiff(key string, t time.Time, start time.Time) Event { - s.event = s.event.TimeDiff(key, t, start) - return s -} - -func (s *event) Caller(skip ...int) Event { - s.event = s.event.Caller(skip...) - return s -} - -func (s *event) IPAddr(key string, ip net.IP) Event { - s.event = s.event.IPAddr(key, ip) - return s -} - -func (s *event) IPPrefix(key string, pfx net.IPNet) Event { - s.event = s.event.IPPrefix(key, pfx) - return s -} - -func (s *event) MACAddr(key string, ha net.HardwareAddr) Event { - s.event = s.event.MACAddr(key, ha) - return s -} diff --git a/packages/go/bhlog/go.mod b/packages/go/bhlog/go.mod index 6682d3ecfe..5878b994a8 100644 --- a/packages/go/bhlog/go.mod +++ b/packages/go/bhlog/go.mod @@ -17,14 +17,3 @@ module github.com/specterops/bloodhound/bhlog go 1.23 - -require ( - github.com/rs/zerolog v1.29.1 - go.uber.org/mock v0.2.0 -) - -require ( - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect - golang.org/x/sys v0.28.0 // indirect -) diff --git a/packages/go/bhlog/go.sum b/packages/go/bhlog/go.sum index e1c34020d9..e69de29bb2 100644 --- a/packages/go/bhlog/go.sum +++ b/packages/go/bhlog/go.sum @@ -1,20 +0,0 @@ -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= -github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= -go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= -go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= diff --git a/packages/go/bhlog/golog.go b/packages/go/bhlog/golog.go deleted file mode 100644 index 70128d4c73..0000000000 --- a/packages/go/bhlog/golog.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package bhlog - -import golog "log" - -// adapter is a simple struct that provides a golang stdlib interface to the BloodHound logging framework. -type adapter struct { - level Level -} - -func (s adapter) Write(msgBytes []byte) (n int, err error) { - WithLevel(s.level).Msg(string(msgBytes)) - return len(msgBytes), nil -} - -// Adapter creates a *golog.Logger instance that will correctly write out structured logs via the BloodHound logging -// framework. This tool is useful when adapting libraries that require the golang stdlib logging interface. -func Adapter(level Level, prefix string, flag int) *golog.Logger { - return golog.New(adapter{ - level: level, - }, prefix, flag) -} diff --git a/packages/go/bhlog/handlers/handlers.go b/packages/go/bhlog/handlers/handlers.go index 755fceb646..fcab5b456c 100644 --- a/packages/go/bhlog/handlers/handlers.go +++ b/packages/go/bhlog/handlers/handlers.go @@ -19,15 +19,11 @@ package handlers import ( "context" "log/slog" - "os" - "runtime" "github.com/specterops/bloodhound/src/auth" "github.com/specterops/bloodhound/src/ctx" ) -var lvl = new(slog.LevelVar) - type ContextHandler struct { IDResolver auth.IdentityResolver @@ -58,52 +54,21 @@ func (h ContextHandler) Handle(c context.Context, r slog.Record) error { return h.Handler.Handle(c, r) } -func ReplaceAttr(_ []string, a slog.Attr) slog.Attr { - if a.Key == slog.MessageKey { - a.Key = "message" - } - - return a -} - -func NewDefaultLogger() *slog.Logger { - return slog.New(&ContextHandler{IDResolver: auth.NewIdentityResolver(), Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: lvl, ReplaceAttr: ReplaceAttr})}) -} - -func SetGlobalLevel(level slog.Level) { - lvl.Set(level) +type OriginHandler struct { + Origin string + slog.Handler } -func GlobalLevel() slog.Level { - return lvl.Level() -} +func (h OriginHandler) Handle(c context.Context, r slog.Record) error { + r.Add("origin", h.Origin) -type stackFrame struct { - File string `json:"file"` - Line int `json:"line"` - Func string `json:"func"` + return h.Handler.Handle(c, r) } -func GetSlogCallStack() slog.Attr { - var outputFrames []stackFrame - - pc := make([]uintptr, 25) // Arbitrarily only go to a call depth of 25 - n := runtime.Callers(1, pc) - if n == 0 { - return slog.Attr{} - } - pc = pc[:n] - frames := runtime.CallersFrames(pc) - - for { - frame, more := frames.Next() - - outputFrames = append(outputFrames, stackFrame{File: frame.File, Line: frame.Line, Func: frame.Function}) - - if !more { - break - } +func ReplaceMessageKey(_ []string, a slog.Attr) slog.Attr { + if a.Key == slog.MessageKey { + a.Key = "message" } - return slog.Any("stack", outputFrames) + return a } diff --git a/packages/go/bhlog/level/level.go b/packages/go/bhlog/level/level.go new file mode 100644 index 0000000000..c4cea41286 --- /dev/null +++ b/packages/go/bhlog/level/level.go @@ -0,0 +1,21 @@ +package level + +import "log/slog" + +var lvl = new(slog.LevelVar) + +func GetLevelVar() *slog.LevelVar { + return lvl +} + +func SetGlobalLevel(level slog.Level) { + lvl.Set(level) +} + +func GlobalAccepts(level slog.Level) bool { + return lvl.Level() <= level +} + +func GlobalLevel() slog.Level { + return lvl.Level() +} diff --git a/packages/go/bhlog/log.go b/packages/go/bhlog/log.go deleted file mode 100644 index 3914748561..0000000000 --- a/packages/go/bhlog/log.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package bhlog - -import ( - "fmt" - "strings" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -// Level is a type alias that represents a log verbosity level. -type Level = zerolog.Level - -const ( - // LevelPanic is the least verbose log level. This should be reserved for any logging statements that MUST be - // written regardless of the configured global or event logging level. - LevelPanic = zerolog.PanicLevel - - // LevelFatal is on of the lease verbose log levels reserved for error messages that would result in the - // application becoming non-functional. - LevelFatal = zerolog.FatalLevel - - // LevelError is a log verbosity level reserved for error reporting. - LevelError = zerolog.ErrorLevel - - // LevelWarn is a log verbosity level reserved for warnings that are non-critical but otherwise noteworthy to - // consumers of log data. - LevelWarn = zerolog.WarnLevel - - // LevelInfo is a log verbosity level reserved for information that is non-critical but otherwise noteworthy to - // consumers of log data. - LevelInfo = zerolog.InfoLevel - - // LevelDebug is a log verbosity level reserved for nosier information that is non-critical but essential to - // debugging application workflows. - LevelDebug = zerolog.DebugLevel - - // LevelTrace is the most verbose log level. This should be reserved for any logging statements that would - // otherwise flood the application log with data of questionable value. - LevelTrace = zerolog.TraceLevel - - // LevelNone defines an absent log verbosity level. - LevelNone = zerolog.NoLevel - - // LevelDisabled defines a log verbosity level that disables the sending of logging events. - LevelDisabled = zerolog.Disabled - - FieldElapsed = "elapsed" - FieldMeasurementID = "measurement_id" -) - -// The values below are configured in this manner to avoid having to rely on default values that may change throughout -// the development lifecycle of zerolog. While inefficient this is only done once at time of import of the log module. -var ( - levelPanicValue = strings.ToLower(zerolog.PanicLevel.String()) - levelFatalValue = strings.ToLower(zerolog.FatalLevel.String()) - levelErrorValue = strings.ToLower(zerolog.ErrorLevel.String()) - levelWarnValue = strings.ToLower(zerolog.WarnLevel.String()) - levelInfoValue = strings.ToLower(zerolog.InfoLevel.String()) - levelDebugValue = strings.ToLower(zerolog.DebugLevel.String()) - levelTraceValue = strings.ToLower(zerolog.TraceLevel.String()) - levelNoneValue = strings.ToLower(zerolog.NoLevel.String()) - levelDisabledValue = strings.ToLower(zerolog.Disabled.String()) -) - -// ParseLevel takes a string value and attempts to match it to the string representation of one of the log verbosity -// levels available. This defaults to LevelNone along with an error if a match can not be found. -func ParseLevel(rawLevel string) (Level, error) { - switch strings.ToLower(rawLevel) { - case levelPanicValue: - return LevelPanic, nil - - case levelFatalValue: - return LevelFatal, nil - - case levelErrorValue: - return LevelError, nil - - case levelWarnValue: - return LevelWarn, nil - - case levelInfoValue: - return LevelInfo, nil - - case levelDebugValue: - return LevelDebug, nil - - case levelTraceValue: - return LevelTrace, nil - - case levelNoneValue: - return LevelNone, nil - - case levelDisabledValue: - return LevelDisabled, nil - - default: - return LevelNone, fmt.Errorf("unknown log level: %s", rawLevel) - } -} - -// SetGlobalLevel sets the global log verbosity level. -func SetGlobalLevel(level Level) { - zerolog.SetGlobalLevel(level) -} - -// GlobalAccepts returns true if the given log verbosity level would be emitted based on the current global logging -// level. -func GlobalAccepts(level Level) bool { - return GlobalLevel() <= level -} - -// GlobalLevel returns the current global log verbosity level. -func GlobalLevel() Level { - return zerolog.GlobalLevel() -} - -// WithLevel returns a logging event with the given log verbosity level. -func WithLevel(level Level) Event { - return &event{ - event: log.WithLevel(level), - } -} diff --git a/packages/go/bhlog/mocks/event.go b/packages/go/bhlog/mocks/event.go deleted file mode 100644 index c11c43893c..0000000000 --- a/packages/go/bhlog/mocks/event.go +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/specterops/bloodhound/log (interfaces: Event) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - net "net" - reflect "reflect" - time "time" - - "github.com/specterops/bloodhound/bhlog" - gomock "go.uber.org/mock/gomock" -) - -// MockEvent is a mock of Event interface. -type MockEvent struct { - ctrl *gomock.Controller - recorder *MockEventMockRecorder -} - -// MockEventMockRecorder is the mock recorder for MockEvent. -type MockEventMockRecorder struct { - mock *MockEvent -} - -// NewMockEvent creates a new mock instance. -func NewMockEvent(ctrl *gomock.Controller) *MockEvent { - mock := &MockEvent{ctrl: ctrl} - mock.recorder = &MockEventMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockEvent) EXPECT() *MockEventMockRecorder { - return m.recorder -} - -// Any mocks base method. -func (m *MockEvent) Any(arg0 string, arg1 interface{}) bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Any", arg0, arg1) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Any indicates an expected call of Any. -func (mr *MockEventMockRecorder) Any(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Any", reflect.TypeOf((*MockEvent)(nil).Any), arg0, arg1) -} - -// Bool mocks base method. -func (m *MockEvent) Bool(arg0 string, arg1 ...bool) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Bool", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Bool indicates an expected call of Bool. -func (mr *MockEventMockRecorder) Bool(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bool", reflect.TypeOf((*MockEvent)(nil).Bool), varargs...) -} - -// Bytes mocks base method. -func (m *MockEvent) Bytes(arg0 string, arg1 []byte) bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Bytes", arg0, arg1) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Bytes indicates an expected call of Bytes. -func (mr *MockEventMockRecorder) Bytes(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bytes", reflect.TypeOf((*MockEvent)(nil).Bytes), arg0, arg1) -} - -// Caller mocks base method. -func (m *MockEvent) Caller(arg0 ...int) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range arg0 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Caller", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Caller indicates an expected call of Caller. -func (mr *MockEventMockRecorder) Caller(arg0 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Caller", reflect.TypeOf((*MockEvent)(nil).Caller), arg0...) -} - -// Discard mocks base method. -func (m *MockEvent) Discard() bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Discard") - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Discard indicates an expected call of Discard. -func (mr *MockEventMockRecorder) Discard() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discard", reflect.TypeOf((*MockEvent)(nil).Discard)) -} - -// Duration mocks base method. -func (m *MockEvent) Duration(arg0 string, arg1 ...time.Duration) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Duration", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Duration indicates an expected call of Duration. -func (mr *MockEventMockRecorder) Duration(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Duration", reflect.TypeOf((*MockEvent)(nil).Duration), varargs...) -} - -// Enabled mocks base method. -func (m *MockEvent) Enabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Enabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// Enabled indicates an expected call of Enabled. -func (mr *MockEventMockRecorder) Enabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockEvent)(nil).Enabled)) -} - -// Fault mocks base method. -func (m *MockEvent) Fault(arg0 ...error) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range arg0 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Fault", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Fault indicates an expected call of Fault. -func (mr *MockEventMockRecorder) Fault(arg0 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fault", reflect.TypeOf((*MockEvent)(nil).Fault), arg0...) -} - -// Float32 mocks base method. -func (m *MockEvent) Float32(arg0 string, arg1 ...float32) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Float32", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Float32 indicates an expected call of Float32. -func (mr *MockEventMockRecorder) Float32(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Float32", reflect.TypeOf((*MockEvent)(nil).Float32), varargs...) -} - -// Float64 mocks base method. -func (m *MockEvent) Float64(arg0 string, arg1 ...float64) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Float64", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Float64 indicates an expected call of Float64. -func (mr *MockEventMockRecorder) Float64(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Float64", reflect.TypeOf((*MockEvent)(nil).Float64), varargs...) -} - -// Hex mocks base method. -func (m *MockEvent) Hex(arg0 string, arg1 []byte) bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Hex", arg0, arg1) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Hex indicates an expected call of Hex. -func (mr *MockEventMockRecorder) Hex(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hex", reflect.TypeOf((*MockEvent)(nil).Hex), arg0, arg1) -} - -// IPAddr mocks base method. -func (m *MockEvent) IPAddr(arg0 string, arg1 net.IP) bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IPAddr", arg0, arg1) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// IPAddr indicates an expected call of IPAddr. -func (mr *MockEventMockRecorder) IPAddr(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IPAddr", reflect.TypeOf((*MockEvent)(nil).IPAddr), arg0, arg1) -} - -// IPPrefix mocks base method. -func (m *MockEvent) IPPrefix(arg0 string, arg1 net.IPNet) bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IPPrefix", arg0, arg1) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// IPPrefix indicates an expected call of IPPrefix. -func (mr *MockEventMockRecorder) IPPrefix(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IPPrefix", reflect.TypeOf((*MockEvent)(nil).IPPrefix), arg0, arg1) -} - -// Int mocks base method. -func (m *MockEvent) Int(arg0 string, arg1 ...int) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Int", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Int indicates an expected call of Int. -func (mr *MockEventMockRecorder) Int(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int", reflect.TypeOf((*MockEvent)(nil).Int), varargs...) -} - -// Int16 mocks base method. -func (m *MockEvent) Int16(arg0 string, arg1 ...int16) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Int16", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Int16 indicates an expected call of Int16. -func (mr *MockEventMockRecorder) Int16(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int16", reflect.TypeOf((*MockEvent)(nil).Int16), varargs...) -} - -// Int32 mocks base method. -func (m *MockEvent) Int32(arg0 string, arg1 ...int32) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Int32", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Int32 indicates an expected call of Int32. -func (mr *MockEventMockRecorder) Int32(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int32", reflect.TypeOf((*MockEvent)(nil).Int32), varargs...) -} - -// Int64 mocks base method. -func (m *MockEvent) Int64(arg0 string, arg1 ...int64) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Int64", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Int64 indicates an expected call of Int64. -func (mr *MockEventMockRecorder) Int64(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int64", reflect.TypeOf((*MockEvent)(nil).Int64), varargs...) -} - -// Int8 mocks base method. -func (m *MockEvent) Int8(arg0 string, arg1 ...int8) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Int8", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Int8 indicates an expected call of Int8. -func (mr *MockEventMockRecorder) Int8(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int8", reflect.TypeOf((*MockEvent)(nil).Int8), varargs...) -} - -// MACAddr mocks base method. -func (m *MockEvent) MACAddr(arg0 string, arg1 net.HardwareAddr) bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MACAddr", arg0, arg1) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// MACAddr indicates an expected call of MACAddr. -func (mr *MockEventMockRecorder) MACAddr(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MACAddr", reflect.TypeOf((*MockEvent)(nil).MACAddr), arg0, arg1) -} - -// Msg mocks base method. -func (m *MockEvent) Msg(arg0 string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Msg", arg0) -} - -// Msg indicates an expected call of Msg. -func (mr *MockEventMockRecorder) Msg(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Msg", reflect.TypeOf((*MockEvent)(nil).Msg), arg0) -} - -// Msgf mocks base method. -func (m *MockEvent) Msgf(arg0 string, arg1 ...interface{}) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Msgf", varargs...) -} - -// Msgf indicates an expected call of Msgf. -func (mr *MockEventMockRecorder) Msgf(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Msgf", reflect.TypeOf((*MockEvent)(nil).Msgf), varargs...) -} - -// Send mocks base method. -func (m *MockEvent) Send() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Send") -} - -// Send indicates an expected call of Send. -func (mr *MockEventMockRecorder) Send() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockEvent)(nil).Send)) -} - -// Stack mocks base method. -func (m *MockEvent) Stack() bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stack") - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Stack indicates an expected call of Stack. -func (mr *MockEventMockRecorder) Stack() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stack", reflect.TypeOf((*MockEvent)(nil).Stack)) -} - -// Str mocks base method. -func (m *MockEvent) Str(arg0 string, arg1 ...string) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Str", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Str indicates an expected call of Str. -func (mr *MockEventMockRecorder) Str(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Str", reflect.TypeOf((*MockEvent)(nil).Str), varargs...) -} - -// Time mocks base method. -func (m *MockEvent) Time(arg0 string, arg1 ...time.Time) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Time", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Time indicates an expected call of Time. -func (mr *MockEventMockRecorder) Time(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Time", reflect.TypeOf((*MockEvent)(nil).Time), varargs...) -} - -// TimeDiff mocks base method. -func (m *MockEvent) TimeDiff(arg0 string, arg1, arg2 time.Time) bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TimeDiff", arg0, arg1, arg2) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// TimeDiff indicates an expected call of TimeDiff. -func (mr *MockEventMockRecorder) TimeDiff(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeDiff", reflect.TypeOf((*MockEvent)(nil).TimeDiff), arg0, arg1, arg2) -} - -// Timestamp mocks base method. -func (m *MockEvent) Timestamp() bhlog.Event { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Timestamp") - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Timestamp indicates an expected call of Timestamp. -func (mr *MockEventMockRecorder) Timestamp() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockEvent)(nil).Timestamp)) -} - -// Uint mocks base method. -func (m *MockEvent) Uint(arg0 string, arg1 ...uint) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Uint", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Uint indicates an expected call of Uint. -func (mr *MockEventMockRecorder) Uint(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uint", reflect.TypeOf((*MockEvent)(nil).Uint), varargs...) -} - -// Uint16 mocks base method. -func (m *MockEvent) Uint16(arg0 string, arg1 ...uint16) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Uint16", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Uint16 indicates an expected call of Uint16. -func (mr *MockEventMockRecorder) Uint16(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uint16", reflect.TypeOf((*MockEvent)(nil).Uint16), varargs...) -} - -// Uint32 mocks base method. -func (m *MockEvent) Uint32(arg0 string, arg1 ...uint32) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Uint32", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Uint32 indicates an expected call of Uint32. -func (mr *MockEventMockRecorder) Uint32(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uint32", reflect.TypeOf((*MockEvent)(nil).Uint32), varargs...) -} - -// Uint64 mocks base method. -func (m *MockEvent) Uint64(arg0 string, arg1 ...uint64) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Uint64", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Uint64 indicates an expected call of Uint64. -func (mr *MockEventMockRecorder) Uint64(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uint64", reflect.TypeOf((*MockEvent)(nil).Uint64), varargs...) -} - -// Uint8 mocks base method. -func (m *MockEvent) Uint8(arg0 string, arg1 ...byte) bhlog.Event { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Uint8", varargs...) - ret0, _ := ret[0].(bhlog.Event) - return ret0 -} - -// Uint8 indicates an expected call of Uint8. -func (mr *MockEventMockRecorder) Uint8(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uint8", reflect.TypeOf((*MockEvent)(nil).Uint8), varargs...) -} diff --git a/packages/go/dawgs/go.mod b/packages/go/dawgs/go.mod index fef1916000..f48efaf34b 100644 --- a/packages/go/dawgs/go.mod +++ b/packages/go/dawgs/go.mod @@ -25,8 +25,8 @@ require ( github.com/jackc/pgtype v1.14.4 github.com/jackc/pgx/v5 v5.7.1 github.com/neo4j/neo4j-go-driver/v5 v5.9.0 - github.com/specterops/bloodhound/cypher v0.0.0-00010101000000-000000000000 github.com/specterops/bloodhound/bhlog v0.0.0-00010101000000-000000000000 + github.com/specterops/bloodhound/cypher v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.2.0 ) @@ -41,21 +41,17 @@ require ( github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/kr/pretty v0.3.1 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect github.com/mschoch/smat v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect - github.com/rs/zerolog v1.29.1 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace ( - github.com/specterops/bloodhound/cypher => ../cypher github.com/specterops/bloodhound/bhlog => ../bhlog + github.com/specterops/bloodhound/cypher => ../cypher ) diff --git a/packages/go/dawgs/go.sum b/packages/go/dawgs/go.sum index ce13f5e21b..7b0f2c6d4b 100644 --- a/packages/go/dawgs/go.sum +++ b/packages/go/dawgs/go.sum @@ -14,7 +14,6 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -27,7 +26,6 @@ github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= @@ -102,32 +100,21 @@ github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/neo4j/neo4j-go-driver/v5 v5.9.0 h1:TYxT0RSiwnvVFia90V7TLnRXv8HkdQQ6rTUaPVoyZ+w= github.com/neo4j/neo4j-go-driver/v5 v5.9.0/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= -github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -211,17 +198,11 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/packages/go/stbernard/cmdrunner/cmdrunner.go b/packages/go/stbernard/cmdrunner/cmdrunner.go index 2faf2a3bb4..10df3949d6 100644 --- a/packages/go/stbernard/cmdrunner/cmdrunner.go +++ b/packages/go/stbernard/cmdrunner/cmdrunner.go @@ -19,12 +19,12 @@ package cmdrunner import ( "errors" "fmt" - "github.com/specterops/bloodhound/bhlog" "log/slog" "os" "os/exec" "strings" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/packages/go/stbernard/environment" ) @@ -43,7 +43,7 @@ func Run(command string, args []string, path string, env environment.Environment cmdstr = command + " " + args[0] cmd = exec.Command(command, args...) - debugEnabled = bhlog.GlobalAccepts(bhlog.LevelDebug) + debugEnabled = level.GlobalAccepts(slog.LevelDebug) ) cmd.Env = env.Slice() diff --git a/packages/go/stbernard/command/command.go b/packages/go/stbernard/command/command.go index 285400e0b6..751fa78c65 100644 --- a/packages/go/stbernard/command/command.go +++ b/packages/go/stbernard/command/command.go @@ -20,10 +20,11 @@ import ( "errors" "flag" "fmt" + "log/slog" "os" "strings" - "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/packages/go/stbernard/command/analysis" "github.com/specterops/bloodhound/packages/go/stbernard/command/builder" "github.com/specterops/bloodhound/packages/go/stbernard/command/cover" @@ -139,11 +140,11 @@ func ParseCLI(env environment.Environment) (CommandRunner, error) { } if *verboseEnabled { - bhlog.SetGlobalLevel(bhlog.LevelInfo) + level.SetGlobalLevel(slog.LevelInfo) } if *debugEnabled { - bhlog.SetGlobalLevel(bhlog.LevelDebug) + level.SetGlobalLevel(slog.LevelDebug) } return currentCmd, currentCmd.Parse(cmdStartIdx) diff --git a/packages/go/stbernard/git/git.go b/packages/go/stbernard/git/git.go index 61556cb1e2..0396867492 100644 --- a/packages/go/stbernard/git/git.go +++ b/packages/go/stbernard/git/git.go @@ -20,6 +20,7 @@ import ( "bytes" "errors" "fmt" + "github.com/specterops/bloodhound/bhlog/level" "log/slog" "os" "os/exec" @@ -28,7 +29,6 @@ import ( "strings" "github.com/Masterminds/semver/v3" - "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/packages/go/stbernard/cmdrunner" "github.com/specterops/bloodhound/packages/go/stbernard/environment" ) @@ -77,7 +77,7 @@ func CheckClean(cwd string, env environment.Environment) (bool, error) { cmd.Env = env.Slice() cmd.Dir = cwd - if bhlog.GlobalAccepts(bhlog.LevelDebug) { + if level.GlobalAccepts(slog.LevelDebug) { cmd.Stderr = os.Stderr } @@ -168,7 +168,7 @@ func getAllVersionTags(cwd string, env environment.Environment) ([]string, error cmd.Dir = cwd cmd.Stdout = &output - if bhlog.GlobalAccepts(bhlog.LevelDebug) { + if level.GlobalAccepts(slog.LevelDebug) { cmd.Stderr = os.Stderr } diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index 1feff87a6e..31b4869e5e 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -25,6 +25,7 @@ import ( "os" "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/packages/go/stbernard/command" "github.com/specterops/bloodhound/packages/go/stbernard/environment" ) @@ -33,7 +34,8 @@ func main() { env := environment.NewEnvironment() var rawLvl = env[environment.LogLevelVarName] - bhlog.ConfigureDefaults() + logger := bhlog.NewDefaultLogger() + slog.SetDefault(logger) if rawLvl == "" { rawLvl = "warn" @@ -42,7 +44,7 @@ func main() { if lvl, err := bhlog.ParseLevel(rawLvl); err != nil { slog.Error(fmt.Sprintf("Could not parse log level from %s: %v", environment.LogLevelVarName, err)) } else { - bhlog.SetGlobalLevel(lvl) + level.GlobalAccepts(lvl) } if cmd, err := command.ParseCLI(env); errors.Is(err, command.ErrNoCmd) { From 7eadb71cdfbe48b6896e8d176fcf114b86cfd37b Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 22:47:28 -0500 Subject: [PATCH 16/20] BED-4153: Resolve issues from migration --- cmd/api/src/api/middleware/logging.go | 2 +- cmd/api/src/api/v2/auth/auth_test.go | 4 +++- cmd/api/src/cmd/bhapi/main.go | 4 ++-- cmd/api/src/cmd/dawgs-harness/main.go | 13 +++++++------ cmd/api/src/daemons/datapipe/jobs.go | 2 +- cmd/api/src/database/log_test.go | 4 ++-- cmd/api/src/queries/graph.go | 4 ++-- packages/go/analysis/ad/esc3.go | 2 +- packages/go/analysis/ad/ntlm.go | 3 ++- packages/go/bhlog/bhlog.go | 16 ++++++++++++++++ packages/go/bhlog/level/level.go | 16 ++++++++++++++++ packages/go/ein/ad.go | 2 +- packages/go/schemagen/main.go | 5 ++++- packages/go/stbernard/git/git.go | 2 +- 14 files changed, 59 insertions(+), 20 deletions(-) diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index ce1ea71dbf..cc2ccc51aa 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -150,7 +150,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http if !deadline.IsZero() && time.Now().After(deadline) { slog.WarnContext( request.Context(), - fmt.Sprintf("%s %s took longer than the configured timeout of %d seconds", request.Method, request.URL.RequestURI(), timeout.Seconds()), + fmt.Sprintf("%s %s took longer than the configured timeout of %0.f seconds", request.Method, request.URL.RequestURI(), timeout.Seconds()), ) } }() diff --git a/cmd/api/src/api/v2/auth/auth_test.go b/cmd/api/src/api/v2/auth/auth_test.go index 1e4f87cf41..928c0edad0 100644 --- a/cmd/api/src/api/v2/auth/auth_test.go +++ b/cmd/api/src/api/v2/auth/auth_test.go @@ -22,6 +22,7 @@ import ( "database/sql" "encoding/json" "fmt" + "log/slog" "net/http" "net/http/httptest" "net/url" @@ -1226,7 +1227,8 @@ func TestCreateUser_ResetPassword(t *testing.T) { goodUserMap, } - bhlog.ConfigureDefaults() + logger := bhlog.NewDefaultLogger() + slog.SetDefault(logger) ctx := context.WithValue(context.Background(), ctx.ValueKey, &ctx.Context{}) payload, err := json.Marshal(input.Body) diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index 00c08517dd..2ad5f01449 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -23,7 +23,7 @@ import ( "log/slog" "os" - "github.com/specterops/bloodhound/bhlog/handlers" + "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/src/bootstrap" "github.com/specterops/bloodhound/src/config" @@ -58,7 +58,7 @@ func main() { printVersion() } - logger := handlers.NewDefaultLogger() + logger := bhlog.NewDefaultLogger() slog.SetDefault(logger) if cfg, err := config.GetConfiguration(configFilePath, config.NewDefaultConfiguration); err != nil { diff --git a/cmd/api/src/cmd/dawgs-harness/main.go b/cmd/api/src/cmd/dawgs-harness/main.go index df1c0abe97..8f2a48ba99 100644 --- a/cmd/api/src/cmd/dawgs-harness/main.go +++ b/cmd/api/src/cmd/dawgs-harness/main.go @@ -20,6 +20,7 @@ import ( "context" "flag" "fmt" + "log/slog" _ "net/http/pprof" "os" "os/signal" @@ -27,15 +28,14 @@ import ( "syscall" "time" - "github.com/specterops/bloodhound/dawgs/drivers/neo4j" - "github.com/specterops/bloodhound/dawgs/drivers/pg" - "github.com/specterops/bloodhound/dawgs/util/size" - schema "github.com/specterops/bloodhound/graphschema" - "github.com/jedib0t/go-pretty/v6/table" "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/dawgs" + "github.com/specterops/bloodhound/dawgs/drivers/neo4j" + "github.com/specterops/bloodhound/dawgs/drivers/pg" "github.com/specterops/bloodhound/dawgs/graph" + "github.com/specterops/bloodhound/dawgs/util/size" + schema "github.com/specterops/bloodhound/graphschema" "github.com/specterops/bloodhound/src/cmd/dawgs-harness/tests" ) @@ -126,7 +126,8 @@ func main() { flag.StringVar(&pgConnectionStr, "pg", "user=bhe dbname=bhe password=bhe4eva host=localhost", "PostgreSQL connection string.") flag.Parse() - bhlog.ConfigureDefaults() + logger := bhlog.NewDefaultLogger() + slog.SetDefault(logger) switch testType { case "both": diff --git a/cmd/api/src/daemons/datapipe/jobs.go b/cmd/api/src/daemons/datapipe/jobs.go index 49cbff5f3b..54affb8ca6 100644 --- a/cmd/api/src/daemons/datapipe/jobs.go +++ b/cmd/api/src/daemons/datapipe/jobs.go @@ -263,7 +263,7 @@ func (s *Daemon) processIngestTasks(ctx context.Context, ingestTasks model.Inges job.TotalFiles = total job.FailedFiles += failed if err = s.db.UpdateFileUploadJob(ctx, job); err != nil { - slog.ErrorContext(ctx, fmt.Sprintf("Failed to update number of failed files for file upload job ID %s: %v", job.ID, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Failed to update number of failed files for file upload job ID %d: %v", job.ID, err)) } } diff --git a/cmd/api/src/database/log_test.go b/cmd/api/src/database/log_test.go index ea2c99c1ca..ac58685d78 100644 --- a/cmd/api/src/database/log_test.go +++ b/cmd/api/src/database/log_test.go @@ -19,12 +19,12 @@ package database_test import ( "bytes" "fmt" - "github.com/specterops/bloodhound/bhlog/handlers" "log/slog" "strings" "testing" "time" + "github.com/specterops/bloodhound/bhlog/handlers" "github.com/specterops/bloodhound/src/database" ) @@ -37,7 +37,7 @@ func TestGormLogAdapter_Info(t *testing.T) { ) var buf bytes.Buffer - slog.SetDefault(slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceAttr}))) + slog.SetDefault(slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceMessageKey}))) expected := fmt.Sprintf(`message="message %d %s %f"`, 1, "arg", 2.0) gormLogAdapter.Info(nil, "message %d %s %f", 1, "arg", 2.0) diff --git a/cmd/api/src/queries/graph.go b/cmd/api/src/queries/graph.go index 9bc5cb0703..62fad584a5 100644 --- a/cmd/api/src/queries/graph.go +++ b/cmd/api/src/queries/graph.go @@ -444,13 +444,13 @@ func (s *GraphQuery) RawCypherQuery(ctx context.Context, pQuery PreparedQuery, i ) if bhCtxInst.Timeout > maxTimeout { - slog.DebugContext(ctx, fmt.Sprintf("Custom timeout is too large, using the maximum allowable timeout of %d minutes instead", maxTimeout.Minutes())) + slog.DebugContext(ctx, fmt.Sprintf("Custom timeout is too large, using the maximum allowable timeout of %0.f minutes instead", maxTimeout.Minutes())) bhCtxInst.Timeout = maxTimeout } availableRuntime := bhCtxInst.Timeout if availableRuntime > 0 { - slog.DebugContext(ctx, fmt.Sprintf("Available timeout for query is set to: %d seconds", availableRuntime.Seconds())) + slog.DebugContext(ctx, fmt.Sprintf("Available timeout for query is set to: %0.f seconds", availableRuntime.Seconds())) } else { availableRuntime = defaultTimeout if !s.DisableCypherComplexityLimit { diff --git a/packages/go/analysis/ad/esc3.go b/packages/go/analysis/ad/esc3.go index 35f541d5e4..ea03be0764 100644 --- a/packages/go/analysis/ad/esc3.go +++ b/packages/go/analysis/ad/esc3.go @@ -155,7 +155,7 @@ func PostEnrollOnBehalfOf(domains, enterpriseCertAuthorities, certTemplates []*g } else if version >= 2 { versionTwoTemplates = append(versionTwoTemplates, node) } else { - slog.Warn(fmt.Sprintf("Got cert template %d with an invalid version %d", node.ID, version)) + slog.Warn(fmt.Sprintf("Got cert template %d with an invalid version %0.f", node.ID, version)) } } diff --git a/packages/go/analysis/ad/ntlm.go b/packages/go/analysis/ad/ntlm.go index c780ed509d..d505202cc6 100644 --- a/packages/go/analysis/ad/ntlm.go +++ b/packages/go/analysis/ad/ntlm.go @@ -19,6 +19,7 @@ package ad import ( "context" "errors" + "fmt" "log/slog" "github.com/specterops/bloodhound/analysis" @@ -54,7 +55,7 @@ func PostNTLM(ctx context.Context, db graph.Database, groupExpansions impact.Pat } else if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { return PostCoerceAndRelayNTLMToSMB(tx, outC, groupExpansions, innerComputer, authenticatedUserID) }); err != nil { - slog.WarnContext(ctx, "Post processing failed for %s: %v", ad.CoerceAndRelayNTLMToSMB, err) + slog.WarnContext(ctx, fmt.Sprintf("Post processing failed for %s: %v", ad.CoerceAndRelayNTLMToSMB, err)) // Additional analysis may occur if one of our analysis errors continue } diff --git a/packages/go/bhlog/bhlog.go b/packages/go/bhlog/bhlog.go index de2bd7a67e..c996c2a64e 100644 --- a/packages/go/bhlog/bhlog.go +++ b/packages/go/bhlog/bhlog.go @@ -1,3 +1,19 @@ +// Copyright 2025 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + package bhlog import ( diff --git a/packages/go/bhlog/level/level.go b/packages/go/bhlog/level/level.go index c4cea41286..44a7f9e9ea 100644 --- a/packages/go/bhlog/level/level.go +++ b/packages/go/bhlog/level/level.go @@ -1,3 +1,19 @@ +// Copyright 2025 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + package level import "log/slog" diff --git a/packages/go/ein/ad.go b/packages/go/ein/ad.go index 733c9ab06e..b3723cdbd7 100644 --- a/packages/go/ein/ad.go +++ b/packages/go/ein/ad.go @@ -78,7 +78,7 @@ func stringToBool(itemProps map[string]any, keyName string) { case bool: //pass default: - slog.Debug(fmt.Sprintf("Removing %s with type %T", converted)) + slog.Debug(fmt.Sprintf("Removing %s with type %T", converted, converted)) delete(itemProps, keyName) } } diff --git a/packages/go/schemagen/main.go b/packages/go/schemagen/main.go index db38add244..4aa14848ea 100644 --- a/packages/go/schemagen/main.go +++ b/packages/go/schemagen/main.go @@ -24,6 +24,7 @@ import ( "cuelang.org/go/cue/errors" "github.com/specterops/bloodhound/bhlog" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/schemagen/generator" "github.com/specterops/bloodhound/schemagen/model" "github.com/specterops/bloodhound/schemagen/tsgen" @@ -70,7 +71,9 @@ func GenerateSharedTypeScript(projectRoot string, rootSchema Schema) error { } func main() { - bhlog.Configure(bhlog.DefaultConfiguration().WithLevel(bhlog.LevelDebug)) + logger := bhlog.NewDefaultLogger() + slog.SetDefault(logger) + level.SetGlobalLevel(slog.LevelDebug) cfgBuilder := generator.NewConfigBuilder("/schemas") diff --git a/packages/go/stbernard/git/git.go b/packages/go/stbernard/git/git.go index 0396867492..47bda9d58f 100644 --- a/packages/go/stbernard/git/git.go +++ b/packages/go/stbernard/git/git.go @@ -20,7 +20,6 @@ import ( "bytes" "errors" "fmt" - "github.com/specterops/bloodhound/bhlog/level" "log/slog" "os" "os/exec" @@ -29,6 +28,7 @@ import ( "strings" "github.com/Masterminds/semver/v3" + "github.com/specterops/bloodhound/bhlog/level" "github.com/specterops/bloodhound/packages/go/stbernard/cmdrunner" "github.com/specterops/bloodhound/packages/go/stbernard/environment" ) From e29cb99d3859ab09f7b46ba8377a79640c6af3bf Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 23:29:55 -0500 Subject: [PATCH 17/20] BED-4153: Resolve lint errors --- cmd/api/src/api/auth.go | 2 +- cmd/api/src/api/middleware/logging.go | 8 ++++---- cmd/api/src/api/middleware/middleware.go | 2 +- cmd/api/src/api/tools/pg.go | 4 ++-- cmd/api/src/api/v2/agi.go | 2 +- cmd/api/src/api/v2/analysisrequest.go | 3 +-- cmd/api/src/api/v2/auth/oidc.go | 2 +- cmd/api/src/api/v2/database_wipe.go | 2 +- cmd/api/src/api/v2/flag.go | 2 +- cmd/api/src/bootstrap/server.go | 6 +++--- cmd/api/src/daemons/datapipe/jobs.go | 2 +- cmd/api/src/database/log_test.go | 3 ++- cmd/api/src/model/appcfg/parameter.go | 14 +++++++------- cmd/api/src/services/dataquality/dataquality.go | 2 +- cmd/api/src/services/entrypoint.go | 2 +- packages/go/analysis/ad/ad.go | 2 +- packages/go/analysis/ad/post.go | 2 +- packages/go/analysis/post.go | 4 ++-- packages/go/analysis/post_operation.go | 4 ++-- packages/go/conftool/main.go | 5 ++--- packages/go/stbernard/command/tester/tester.go | 2 +- packages/go/stbernard/main.go | 2 +- 22 files changed, 38 insertions(+), 39 deletions(-) diff --git a/cmd/api/src/api/auth.go b/cmd/api/src/api/auth.go index aa060ec536..83e9c59dd7 100644 --- a/cmd/api/src/api/auth.go +++ b/cmd/api/src/api/auth.go @@ -475,7 +475,7 @@ func (s authenticator) ValidateSession(ctx context.Context, jwtTokenString strin return auth.Context{}, err } else if !token.Valid { - slog.InfoContext(ctx, fmt.Sprintf("Token invalid")) + slog.InfoContext(ctx, "Token invalid") return auth.Context{}, ErrInvalidAuth } else if sessionID, err := claims.SessionID(); err != nil { slog.InfoContext(ctx, fmt.Sprintf("Session ID %s invalid: %v", claims.Id, err)) diff --git a/cmd/api/src/api/middleware/logging.go b/cmd/api/src/api/middleware/logging.go index cc2ccc51aa..ef2833bf2c 100644 --- a/cmd/api/src/api/middleware/logging.go +++ b/cmd/api/src/api/middleware/logging.go @@ -96,17 +96,17 @@ func getSignedRequestDate(request *http.Request) (string, bool) { return requestDateHeader, requestDateHeader != "" } -func setSignedRequestFields(request *http.Request, logAttrs []slog.Attr) { +func setSignedRequestFields(request *http.Request, logAttrs *[]slog.Attr) { // Log the token ID and request date if the request contains either header if requestDateHeader, hasHeader := getSignedRequestDate(request); hasHeader { - logAttrs = append(logAttrs, slog.String("signed_request_date", requestDateHeader)) + *logAttrs = append(*logAttrs, slog.String("signed_request_date", requestDateHeader)) } if authScheme, schemeParameter, err := parseAuthorizationHeader(request); err == nil { switch authScheme { case api.AuthorizationSchemeBHESignature: if _, err := uuid.FromString(schemeParameter); err == nil { - logAttrs = append(logAttrs, slog.String("token_id", schemeParameter)) + *logAttrs = append(*logAttrs, slog.String("token_id", schemeParameter)) } } } @@ -158,7 +158,7 @@ func LoggingMiddleware(idResolver auth.IdentityResolver) func(http.Handler) http next.ServeHTTP(loggedResponse, request) // Log the token ID and request date if the request contains either header - setSignedRequestFields(request, logAttrs) + setSignedRequestFields(request, &logAttrs) // Add the fields that we care about before exiting logAttrs = append(logAttrs, diff --git a/cmd/api/src/api/middleware/middleware.go b/cmd/api/src/api/middleware/middleware.go index 344e5e3abd..28bde783f9 100644 --- a/cmd/api/src/api/middleware/middleware.go +++ b/cmd/api/src/api/middleware/middleware.go @@ -163,7 +163,7 @@ func parseUserIP(r *http.Request) string { } if result := r.Header.Get("X-Forwarded-For"); result == "" { - slog.DebugContext(r.Context(), fmt.Sprintf("No data found in X-Forwarded-For header")) + slog.DebugContext(r.Context(), "No data found in X-Forwarded-For header") return remoteIp } else { result += "," + remoteIp diff --git a/cmd/api/src/api/tools/pg.go b/cmd/api/src/api/tools/pg.go index e20eb9fa2c..a84168230a 100644 --- a/cmd/api/src/api/tools/pg.go +++ b/cmd/api/src/api/tools/pg.go @@ -291,7 +291,7 @@ func (s *PGMigrator) startMigration() error { go func(ctx context.Context) { defer migrationCancelFunc() - slog.InfoContext(ctx, fmt.Sprintf("Starting live migration from Neo4j to PostgreSQL")) + slog.InfoContext(ctx, "Starting live migration from Neo4j to PostgreSQL") if err := pgDB.AssertSchema(ctx, s.graphSchema); err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Unable to assert graph schema in PostgreSQL: %v", err)) @@ -302,7 +302,7 @@ func (s *PGMigrator) startMigration() error { } else if err := migrateEdges(ctx, neo4jDB, pgDB, nodeIDMappings); err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed importing edges into PostgreSQL: %v", err)) } else { - slog.InfoContext(ctx, fmt.Sprintf("Migration to PostgreSQL completed successfully")) + slog.InfoContext(ctx, "Migration to PostgreSQL completed successfully") } if err := s.advanceState(stateIdle, stateMigrating, stateCanceling); err != nil { diff --git a/cmd/api/src/api/v2/agi.go b/cmd/api/src/api/v2/agi.go index 82eae79f3c..b374b0160a 100644 --- a/cmd/api/src/api/v2/agi.go +++ b/cmd/api/src/api/v2/agi.go @@ -278,7 +278,7 @@ func (s Resources) UpdateAssetGroupSelectors(response http.ResponseWriter, reque // When T0 asset group selectors are modified, entire analysis must be re-run var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), "encountered request analysis for unknown user, this shouldn't happen") userId = "unknown-user-update-asset-group-selectors" } else { userId = user.ID.String() diff --git a/cmd/api/src/api/v2/analysisrequest.go b/cmd/api/src/api/v2/analysisrequest.go index 4ae6b07f36..8079d05363 100644 --- a/cmd/api/src/api/v2/analysisrequest.go +++ b/cmd/api/src/api/v2/analysisrequest.go @@ -19,7 +19,6 @@ package v2 import ( "database/sql" "errors" - "fmt" "log/slog" "net/http" @@ -45,7 +44,7 @@ func (s Resources) RequestAnalysis(response http.ResponseWriter, request *http.R var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), "encountered request analysis for unknown user, this shouldn't happen") userId = "unknown-user" } else { userId = user.ID.String() diff --git a/cmd/api/src/api/v2/auth/oidc.go b/cmd/api/src/api/v2/auth/oidc.go index 176896216a..2f30474c69 100644 --- a/cmd/api/src/api/v2/auth/oidc.go +++ b/cmd/api/src/api/v2/auth/oidc.go @@ -228,7 +228,7 @@ func (s ManagementResource) OIDCCallbackHandler(response http.ResponseWriter, re slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] %v", err)) v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else if email, err := getEmailFromOIDCClaims(claims); errors.Is(err, ErrEmailMissing) { // Note email claims are not always present so we will check different claim keys for possible email - slog.ErrorContext(request.Context(), fmt.Sprintf("[OIDC] Claims did not contain any valid email address")) + slog.ErrorContext(request.Context(), "[OIDC] Claims did not contain any valid email address") v2.RedirectToLoginPage(response, request, "Your SSO was unable to authenticate your user, please contact your Administrator") } else { if ssoProvider.Config.AutoProvision.Enabled { diff --git a/cmd/api/src/api/v2/database_wipe.go b/cmd/api/src/api/v2/database_wipe.go index 22b84aa23f..5dcd72b9a2 100644 --- a/cmd/api/src/api/v2/database_wipe.go +++ b/cmd/api/src/api/v2/database_wipe.go @@ -140,7 +140,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt if kickoffAnalysis { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), "encountered request analysis for unknown user, this shouldn't happen") userId = "unknown-user-database-wipe" } else { userId = user.ID.String() diff --git a/cmd/api/src/api/v2/flag.go b/cmd/api/src/api/v2/flag.go index b76405085c..9691fd439a 100644 --- a/cmd/api/src/api/v2/flag.go +++ b/cmd/api/src/api/v2/flag.go @@ -64,7 +64,7 @@ func (s Resources) ToggleFlag(response http.ResponseWriter, request *http.Reques if featureFlag.Key == appcfg.FeatureAdcs && !featureFlag.Enabled { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), "encountered request analysis for unknown user, this shouldn't happen") userId = "unknown-user-toggle-flag" } else { userId = user.ID.String() diff --git a/cmd/api/src/bootstrap/server.go b/cmd/api/src/bootstrap/server.go index 1b7eedb288..77b57af608 100644 --- a/cmd/api/src/bootstrap/server.go +++ b/cmd/api/src/bootstrap/server.go @@ -115,11 +115,11 @@ func MigrateDB(ctx context.Context, cfg config.Configuration, db database.Databa paddingString := strings.Repeat(" ", len(passwordMsg)-2) borderString := strings.Repeat("#", len(passwordMsg)) - slog.Info(fmt.Sprintf("%s", borderString)) + slog.Info(borderString) slog.Info(fmt.Sprintf("#%s#", paddingString)) - slog.Info(fmt.Sprintf("%s", passwordMsg)) + slog.Info(passwordMsg) slog.Info(fmt.Sprintf("#%s#", paddingString)) - slog.Info(fmt.Sprintf("%s", borderString)) + slog.Info("%s", borderString) } } diff --git a/cmd/api/src/daemons/datapipe/jobs.go b/cmd/api/src/daemons/datapipe/jobs.go index 54affb8ca6..6ed39c73e6 100644 --- a/cmd/api/src/daemons/datapipe/jobs.go +++ b/cmd/api/src/daemons/datapipe/jobs.go @@ -248,7 +248,7 @@ func (s *Daemon) processIngestTasks(ctx context.Context, ingestTasks model.Inges } if s.cfg.DisableIngest { - slog.WarnContext(ctx, fmt.Sprintf("Skipped processing of ingestTasks due to config flag.")) + slog.WarnContext(ctx, "Skipped processing of ingestTasks due to config flag.") return } diff --git a/cmd/api/src/database/log_test.go b/cmd/api/src/database/log_test.go index ac58685d78..c3678fdf76 100644 --- a/cmd/api/src/database/log_test.go +++ b/cmd/api/src/database/log_test.go @@ -18,6 +18,7 @@ package database_test import ( "bytes" + "context" "fmt" "log/slog" "strings" @@ -40,7 +41,7 @@ func TestGormLogAdapter_Info(t *testing.T) { slog.SetDefault(slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{ReplaceAttr: handlers.ReplaceMessageKey}))) expected := fmt.Sprintf(`message="message %d %s %f"`, 1, "arg", 2.0) - gormLogAdapter.Info(nil, "message %d %s %f", 1, "arg", 2.0) + gormLogAdapter.Info(context.TODO(), "message %d %s %f", 1, "arg", 2.0) if !strings.Contains(buf.String(), expected) { t.Errorf("gormLogAdapter output does not contain expected\nOutput:%sExpected:%s", buf.String(), expected) } diff --git a/cmd/api/src/model/appcfg/parameter.go b/cmd/api/src/model/appcfg/parameter.go index 924fadd60f..65f8cc3e66 100644 --- a/cmd/api/src/model/appcfg/parameter.go +++ b/cmd/api/src/model/appcfg/parameter.go @@ -170,10 +170,10 @@ func GetPasswordExpiration(ctx context.Context, service ParameterService) time.D var expiration PasswordExpiration if cfg, err := service.GetConfigurationParameter(ctx, PasswordExpirationWindow); err != nil { - slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch password expiratio configuration; returning default values")) + slog.WarnContext(ctx, "Failed to fetch password expiratio configuration; returning default values") return DefaultPasswordExpirationWindow } else if err := cfg.Map(&expiration); err != nil { - slog.WarnContext(ctx, fmt.Sprintf("Invalid password expiration configuration supplied; returning default values")) + slog.WarnContext(ctx, "Invalid password expiration configuration supplied; returning default values") return DefaultPasswordExpirationWindow } @@ -194,9 +194,9 @@ func GetNeo4jParameters(ctx context.Context, service ParameterService) Neo4jPara } if neo4jParametersCfg, err := service.GetConfigurationParameter(ctx, Neo4jConfigs); err != nil { - slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch neo4j configuration; returning default values")) + slog.WarnContext(ctx, "Failed to fetch neo4j configuration; returning default values") } else if err = neo4jParametersCfg.Map(&result); err != nil { - slog.WarnContext(ctx, fmt.Sprintf("Invalid neo4j configuration supplied; returning default values")) + slog.WarnContext(ctx, "Invalid neo4j configuration supplied; returning default values") } return result @@ -212,7 +212,7 @@ func GetCitrixRDPSupport(ctx context.Context, service ParameterService) bool { var result CitrixRDPSupport if cfg, err := service.GetConfigurationParameter(ctx, CitrixRDPSupportKey); err != nil { - slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch CitrixRDPSupport configuration; returning default values")) + slog.WarnContext(ctx, "Failed to fetch CitrixRDPSupport configuration; returning default values") } else if err := cfg.Map(&result); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Invalid CitrixRDPSupport configuration supplied, %v. returning default values.", err)) } @@ -260,7 +260,7 @@ func GetPruneTTLParameters(ctx context.Context, service ParameterService) PruneT } if pruneTTLParametersCfg, err := service.GetConfigurationParameter(ctx, PruneTTL); err != nil { - slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch prune TTL configuration; returning default values")) + slog.WarnContext(ctx, "Failed to fetch prune TTL configuration; returning default values") } else if err = pruneTTLParametersCfg.Map(&result); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Invalid prune TTL configuration supplied; returning default values %+v", err)) } @@ -278,7 +278,7 @@ func GetReconciliationParameter(ctx context.Context, service ParameterService) b result := ReconciliationParameter{Enabled: true} if cfg, err := service.GetConfigurationParameter(ctx, ReconciliationKey); err != nil { - slog.WarnContext(ctx, fmt.Sprintf("Failed to fetch reconciliation configuration; returning default values")) + slog.WarnContext(ctx, "Failed to fetch reconciliation configuration; returning default values") } else if err := cfg.Map(&result); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Invalid reconciliation configuration supplied, %v. returning default values.", err)) } diff --git a/cmd/api/src/services/dataquality/dataquality.go b/cmd/api/src/services/dataquality/dataquality.go index 854255a788..58f1e3a099 100644 --- a/cmd/api/src/services/dataquality/dataquality.go +++ b/cmd/api/src/services/dataquality/dataquality.go @@ -37,7 +37,7 @@ type DataQualityData interface { } func SaveDataQuality(ctx context.Context, db DataQualityData, graphDB graph.Database) error { - slog.InfoContext(ctx, fmt.Sprintf("Started Data Quality Stats Collection")) + slog.InfoContext(ctx, "Started Data Quality Stats Collection") defer measure.ContextMeasure(ctx, slog.LevelInfo, "Successfully Completed Data Quality Stats Collection")() if stats, aggregation, err := ad.GraphStats(ctx, graphDB); err != nil { diff --git a/cmd/api/src/services/entrypoint.go b/cmd/api/src/services/entrypoint.go index 5607d2ac45..e5d78fc910 100644 --- a/cmd/api/src/services/entrypoint.go +++ b/cmd/api/src/services/entrypoint.go @@ -83,7 +83,7 @@ func Entrypoint(ctx context.Context, cfg config.Configuration, connections boots } else if err := connections.Graph.SetDefaultGraph(ctx, schema.DefaultGraph()); err != nil { return nil, fmt.Errorf("no default graph found but migrations are disabled per configuration: %w", err) } else { - slog.InfoContext(ctx, fmt.Sprintf("Database migrations are disabled per configuration")) + slog.InfoContext(ctx, "Database migrations are disabled per configuration") } if apiCache, err := cache.NewCache(cache.Config{MaxSize: cfg.MaxAPICacheSize}); err != nil { diff --git a/packages/go/analysis/ad/ad.go b/packages/go/analysis/ad/ad.go index 550f00d252..9f00111ad9 100644 --- a/packages/go/analysis/ad/ad.go +++ b/packages/go/analysis/ad/ad.go @@ -319,7 +319,7 @@ func createOrUpdateWellKnownLink(tx graph.Transaction, startNode *graph.Node, en // See CalculateCrossProductNodeSetsDoc.md for explaination of the specialGroups (Authenticated Users and Everyone) and why we treat them the way we do func CalculateCrossProductNodeSets(tx graph.Transaction, domainsid string, groupExpansions impact.PathAggregator, nodeSlices ...[]*graph.Node) cardinality.Duplex[uint64] { if len(nodeSlices) < 2 { - slog.Error(fmt.Sprintf("Cross products require at least 2 nodesets")) + slog.Error("Cross products require at least 2 nodesets") return cardinality.NewBitmap64() } diff --git a/packages/go/analysis/ad/post.go b/packages/go/analysis/ad/post.go index 4ad70e60a0..0b93e597b2 100644 --- a/packages/go/analysis/ad/post.go +++ b/packages/go/analysis/ad/post.go @@ -483,7 +483,7 @@ func FetchLocalGroupBitmapForComputer(tx graph.Transaction, computer graph.ID, s } func ExpandAllRDPLocalGroups(ctx context.Context, db graph.Database) (impact.PathAggregator, error) { - slog.InfoContext(ctx, fmt.Sprintf("Expanding all AD group and local group memberships")) + slog.InfoContext(ctx, "Expanding all AD group and local group memberships") return ResolveAllGroupMemberships(ctx, db, query.Not( query.Or( diff --git a/packages/go/analysis/post.go b/packages/go/analysis/post.go index 8e35335ff0..e552dd38f2 100644 --- a/packages/go/analysis/post.go +++ b/packages/go/analysis/post.go @@ -95,7 +95,7 @@ func (s PostProcessingStats) LogStats() { return } - slog.Debug(fmt.Sprintf("Relationships deleted before post-processing:")) + slog.Debug("Relationships deleted before post-processing:") for _, relationship := range statsSortedKeys(s.RelationshipsDeleted) { if numDeleted := s.RelationshipsDeleted[relationship]; numDeleted > 0 { @@ -103,7 +103,7 @@ func (s PostProcessingStats) LogStats() { } } - slog.Debug(fmt.Sprintf("Relationships created after post-processing:")) + slog.Debug("Relationships created after post-processing:") for _, relationship := range statsSortedKeys(s.RelationshipsCreated) { if numDeleted := s.RelationshipsCreated[relationship]; numDeleted > 0 { diff --git a/packages/go/analysis/post_operation.go b/packages/go/analysis/post_operation.go index dd022c18d1..aadc6c9f59 100644 --- a/packages/go/analysis/post_operation.go +++ b/packages/go/analysis/post_operation.go @@ -136,7 +136,7 @@ func (s *AtomicPostProcessingStats) LogStats() { return } - slog.Debug(fmt.Sprintf("Relationships deleted before post-processing:")) + slog.Debug("Relationships deleted before post-processing:") for _, relationship := range atomicStatsSortedKeys(s.RelationshipsDeleted) { if numDeleted := int(*s.RelationshipsDeleted[relationship]); numDeleted > 0 { @@ -144,7 +144,7 @@ func (s *AtomicPostProcessingStats) LogStats() { } } - slog.Debug(fmt.Sprintf("Relationships created after post-processing:")) + slog.Debug("Relationships created after post-processing:") for _, relationship := range atomicStatsSortedKeys(s.RelationshipsCreated) { if numCreated := int(*s.RelationshipsCreated[relationship]); numCreated > 0 { diff --git a/packages/go/conftool/main.go b/packages/go/conftool/main.go index 8eea312a3a..0ee98beee7 100644 --- a/packages/go/conftool/main.go +++ b/packages/go/conftool/main.go @@ -20,7 +20,6 @@ import ( "encoding/json" "flag" "fmt" - "log" "log/slog" "os" "time" @@ -46,7 +45,7 @@ func main() { defer configfile.Close() if !skipArgon2 { - log.Printf(fmt.Sprintf("Tuning Argon2 parameters to target %d milliseconds. This might take some time.", tuneMillis)) + slog.Info(fmt.Sprintf("Tuning Argon2 parameters to target %d milliseconds. This might take some time.", tuneMillis)) } if argon2Config, err := config.GenerateArgonSettings(time.Duration(tuneMillis), skipArgon2); err != nil { @@ -59,7 +58,7 @@ func main() { slog.Error(fmt.Sprintf("Could not write to config file %s: %v", path, err)) os.Exit(1) } else { - log.Printf(fmt.Sprintf("Successfully wrote to config file to %s", path)) + slog.Info(fmt.Sprintf("Successfully wrote to config file to %s", path)) } } } diff --git a/packages/go/stbernard/command/tester/tester.go b/packages/go/stbernard/command/tester/tester.go index 9aeeda1702..8a2602b305 100644 --- a/packages/go/stbernard/command/tester/tester.go +++ b/packages/go/stbernard/command/tester/tester.go @@ -106,7 +106,7 @@ func (s *command) runTests(cwd string, coverPath string, modPaths []string) erro } if !s.yarnOnly { - slog.Info(fmt.Sprintf("Checking coverage directory")) + slog.Info("Checking coverage directory") if err := os.MkdirAll(coverPath, os.ModeDir+fs.ModePerm); err != nil { return fmt.Errorf("making coverage directory: %w", err) } else if dirList, err := os.ReadDir(coverPath); err != nil { diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index 31b4869e5e..45b3b49ccf 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -48,7 +48,7 @@ func main() { } if cmd, err := command.ParseCLI(env); errors.Is(err, command.ErrNoCmd) { - slog.Error(fmt.Sprintf("No valid command specified")) + slog.Error("No valid command specified") os.Exit(1) } else if errors.Is(err, command.ErrHelpRequested) { // No need to exit 1 if help was requested From 31742305c5d97ddb51d57192e837f5fe36a16f3d Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Thu, 9 Jan 2025 23:45:06 -0500 Subject: [PATCH 18/20] BED-4153: Resolve lint issues --- cmd/api/src/api/middleware/compression.go | 2 +- cmd/api/src/api/v2/database_wipe.go | 2 +- cmd/api/src/bootstrap/server.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/api/src/api/middleware/compression.go b/cmd/api/src/api/middleware/compression.go index a4e49cb5db..9353661ed8 100644 --- a/cmd/api/src/api/middleware/compression.go +++ b/cmd/api/src/api/middleware/compression.go @@ -65,7 +65,7 @@ func CompressionMiddleware(next http.Handler) http.Handler { request.Body, err = wrapBody(encoding, request.Body) if err != nil { errMsg := fmt.Sprintf("failed to create reader for %s encoding: %v", encoding, err) - slog.WarnContext(request.Context(), fmt.Sprintf(errMsg)) + slog.WarnContext(request.Context(), errMsg) if errors.Is(err, errUnsupportedEncoding) { api.WriteErrorResponse(request.Context(), api.BuildErrorResponse(http.StatusUnsupportedMediaType, fmt.Sprintf("Error trying to read request: %s", errMsg), request), responseWriter) } else { diff --git a/cmd/api/src/api/v2/database_wipe.go b/cmd/api/src/api/v2/database_wipe.go index 5dcd72b9a2..2dd1e96f10 100644 --- a/cmd/api/src/api/v2/database_wipe.go +++ b/cmd/api/src/api/v2/database_wipe.go @@ -112,7 +112,7 @@ func (s Resources) HandleDatabaseWipe(response http.ResponseWriter, request *htt } else { var userId string if user, isUser := auth.GetUserFromAuthCtx(ctx.FromRequest(request).AuthCtx); !isUser { - slog.WarnContext(request.Context(), fmt.Sprintf("encountered request analysis for unknown user, this shouldn't happen")) + slog.WarnContext(request.Context(), "encountered request analysis for unknown user, this shouldn't happen") userId = "unknown-user-database-wipe" } else { userId = user.ID.String() diff --git a/cmd/api/src/bootstrap/server.go b/cmd/api/src/bootstrap/server.go index 77b57af608..f2e3d415b5 100644 --- a/cmd/api/src/bootstrap/server.go +++ b/cmd/api/src/bootstrap/server.go @@ -119,7 +119,7 @@ func MigrateDB(ctx context.Context, cfg config.Configuration, db database.Databa slog.Info(fmt.Sprintf("#%s#", paddingString)) slog.Info(passwordMsg) slog.Info(fmt.Sprintf("#%s#", paddingString)) - slog.Info("%s", borderString) + slog.Info(borderString) } } From bced3c293eaf964fa726f2eb8a8e3df926cd800d Mon Sep 17 00:00:00 2001 From: Wes <169498386+wes-mil@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:24:43 -0500 Subject: [PATCH 19/20] BED-4153: Add convenience ConfigureDefault function --- cmd/api/src/api/v2/auth/auth_test.go | 4 +--- cmd/api/src/bootstrap/util.go | 3 +-- cmd/api/src/cmd/bhapi/main.go | 3 +-- cmd/api/src/cmd/dawgs-harness/main.go | 4 +--- packages/go/bhlog/bhlog.go | 5 +++++ packages/go/schemagen/main.go | 3 +-- packages/go/stbernard/main.go | 3 +-- 7 files changed, 11 insertions(+), 14 deletions(-) diff --git a/cmd/api/src/api/v2/auth/auth_test.go b/cmd/api/src/api/v2/auth/auth_test.go index 928c0edad0..86ca3ae0dc 100644 --- a/cmd/api/src/api/v2/auth/auth_test.go +++ b/cmd/api/src/api/v2/auth/auth_test.go @@ -22,7 +22,6 @@ import ( "database/sql" "encoding/json" "fmt" - "log/slog" "net/http" "net/http/httptest" "net/url" @@ -1227,8 +1226,7 @@ func TestCreateUser_ResetPassword(t *testing.T) { goodUserMap, } - logger := bhlog.NewDefaultLogger() - slog.SetDefault(logger) + bhlog.ConfigureDefault() ctx := context.WithValue(context.Background(), ctx.ValueKey, &ctx.Context{}) payload, err := json.Marshal(input.Body) diff --git a/cmd/api/src/bootstrap/util.go b/cmd/api/src/bootstrap/util.go index a94c808eee..d73a3e0f92 100644 --- a/cmd/api/src/bootstrap/util.go +++ b/cmd/api/src/bootstrap/util.go @@ -118,8 +118,7 @@ func InitializeLogging(cfg config.Configuration) error { } } - logger := bhlog.NewDefaultLogger() - slog.SetDefault(logger) + bhlog.ConfigureDefault() level.SetGlobalLevel(logLevel) slog.Info("Logging configured") diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index 2ad5f01449..70c9043f66 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -58,8 +58,7 @@ func main() { printVersion() } - logger := bhlog.NewDefaultLogger() - slog.SetDefault(logger) + bhlog.ConfigureDefault() if cfg, err := config.GetConfiguration(configFilePath, config.NewDefaultConfiguration); err != nil { slog.Error(fmt.Sprintf("Unable to read configuration %s: %v", configFilePath, err)) diff --git a/cmd/api/src/cmd/dawgs-harness/main.go b/cmd/api/src/cmd/dawgs-harness/main.go index 8f2a48ba99..e54affc66d 100644 --- a/cmd/api/src/cmd/dawgs-harness/main.go +++ b/cmd/api/src/cmd/dawgs-harness/main.go @@ -20,7 +20,6 @@ import ( "context" "flag" "fmt" - "log/slog" _ "net/http/pprof" "os" "os/signal" @@ -126,8 +125,7 @@ func main() { flag.StringVar(&pgConnectionStr, "pg", "user=bhe dbname=bhe password=bhe4eva host=localhost", "PostgreSQL connection string.") flag.Parse() - logger := bhlog.NewDefaultLogger() - slog.SetDefault(logger) + bhlog.ConfigureDefault() switch testType { case "both": diff --git a/packages/go/bhlog/bhlog.go b/packages/go/bhlog/bhlog.go index c996c2a64e..c04e9d8d39 100644 --- a/packages/go/bhlog/bhlog.go +++ b/packages/go/bhlog/bhlog.go @@ -29,6 +29,11 @@ import ( "github.com/specterops/bloodhound/src/auth" ) +func ConfigureDefault() { + logger := NewDefaultLogger() + slog.SetDefault(logger) +} + func NewDefaultLogger() *slog.Logger { return slog.New(&handlers.ContextHandler{ IDResolver: auth.NewIdentityResolver(), diff --git a/packages/go/schemagen/main.go b/packages/go/schemagen/main.go index 4aa14848ea..8bd13e07d5 100644 --- a/packages/go/schemagen/main.go +++ b/packages/go/schemagen/main.go @@ -71,8 +71,7 @@ func GenerateSharedTypeScript(projectRoot string, rootSchema Schema) error { } func main() { - logger := bhlog.NewDefaultLogger() - slog.SetDefault(logger) + bhlog.ConfigureDefault() level.SetGlobalLevel(slog.LevelDebug) cfgBuilder := generator.NewConfigBuilder("/schemas") diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index 45b3b49ccf..255aefaddb 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -34,8 +34,7 @@ func main() { env := environment.NewEnvironment() var rawLvl = env[environment.LogLevelVarName] - logger := bhlog.NewDefaultLogger() - slog.SetDefault(logger) + bhlog.ConfigureDefault() if rawLvl == "" { rawLvl = "warn" From b85373bcd6589b6673c41b61fb2c28b5fa095684 Mon Sep 17 00:00:00 2001 From: Alyx Holms Date: Fri, 10 Jan 2025 10:30:43 -0700 Subject: [PATCH 20/20] feat: wire up config for text logging --- cmd/api/src/api/v2/auth/auth_test.go | 2 +- cmd/api/src/bootstrap/util.go | 2 +- cmd/api/src/cmd/bhapi/main.go | 10 ++++++- cmd/api/src/cmd/dawgs-harness/main.go | 2 +- cmd/api/src/config/config.go | 5 ++-- cmd/api/src/config/default.go | 7 ++--- cmd/api/src/daemons/api/bhapi/api.go | 4 +-- cmd/api/src/daemons/api/toolapi/api.go | 4 +-- docker-compose.dev.yml | 5 ++++ packages/go/bhlog/bhlog.go | 36 ++++++++++++++++---------- packages/go/schemagen/main.go | 2 +- packages/go/stbernard/main.go | 2 +- 12 files changed, 53 insertions(+), 28 deletions(-) diff --git a/cmd/api/src/api/v2/auth/auth_test.go b/cmd/api/src/api/v2/auth/auth_test.go index 86ca3ae0dc..ab4e4d6e29 100644 --- a/cmd/api/src/api/v2/auth/auth_test.go +++ b/cmd/api/src/api/v2/auth/auth_test.go @@ -1226,7 +1226,7 @@ func TestCreateUser_ResetPassword(t *testing.T) { goodUserMap, } - bhlog.ConfigureDefault() + bhlog.ConfigureDefault(true) ctx := context.WithValue(context.Background(), ctx.ValueKey, &ctx.Context{}) payload, err := json.Marshal(input.Body) diff --git a/cmd/api/src/bootstrap/util.go b/cmd/api/src/bootstrap/util.go index d73a3e0f92..9f6db49526 100644 --- a/cmd/api/src/bootstrap/util.go +++ b/cmd/api/src/bootstrap/util.go @@ -118,7 +118,7 @@ func InitializeLogging(cfg config.Configuration) error { } } - bhlog.ConfigureDefault() + bhlog.ConfigureDefault(cfg.EnableTextLogger) level.SetGlobalLevel(logLevel) slog.Info("Logging configured") diff --git a/cmd/api/src/cmd/bhapi/main.go b/cmd/api/src/cmd/bhapi/main.go index 70c9043f66..0d3eae0a93 100644 --- a/cmd/api/src/cmd/bhapi/main.go +++ b/cmd/api/src/cmd/bhapi/main.go @@ -22,6 +22,7 @@ import ( "fmt" "log/slog" "os" + "strconv" "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/dawgs/graph" @@ -58,7 +59,14 @@ func main() { printVersion() } - bhlog.ConfigureDefault() + enableTextLogger := os.Getenv(config.BHAPIEnvironmentVariablePrefix + "_enable_text_logger") + + if enabled, err := strconv.ParseBool(enableTextLogger); err != nil { + // Default to json because we're not sure what the user wanted + bhlog.ConfigureDefault(false) + } else { + bhlog.ConfigureDefault(enabled) + } if cfg, err := config.GetConfiguration(configFilePath, config.NewDefaultConfiguration); err != nil { slog.Error(fmt.Sprintf("Unable to read configuration %s: %v", configFilePath, err)) diff --git a/cmd/api/src/cmd/dawgs-harness/main.go b/cmd/api/src/cmd/dawgs-harness/main.go index e54affc66d..890c6ae84d 100644 --- a/cmd/api/src/cmd/dawgs-harness/main.go +++ b/cmd/api/src/cmd/dawgs-harness/main.go @@ -125,7 +125,7 @@ func main() { flag.StringVar(&pgConnectionStr, "pg", "user=bhe dbname=bhe password=bhe4eva host=localhost", "PostgreSQL connection string.") flag.Parse() - bhlog.ConfigureDefault() + bhlog.ConfigureDefault(true) switch testType { case "both": diff --git a/cmd/api/src/config/config.go b/cmd/api/src/config/config.go index b22eb1ce08..073e4da934 100644 --- a/cmd/api/src/config/config.go +++ b/cmd/api/src/config/config.go @@ -36,7 +36,7 @@ const ( CurrentConfigurationVersion = 2 DefaultLogFilePath = "/var/log/bhapi.log" - bhAPIEnvironmentVariablePrefix = "bhe" + BHAPIEnvironmentVariablePrefix = "bhe" environmentVariablePathSeparator = "_" environmentVariableKeyValueSeparator = "=" ) @@ -164,6 +164,7 @@ type Configuration struct { GraphQueryMemoryLimit uint16 `json:"graph_query_memory_limit"` AuthSessionTTLHours int `json:"auth_session_ttl_hours"` FedRAMPEULAText string `json:"fedramp_eula_text"` // Enterprise only + EnableTextLogger bool `json:"enable_text_logger"` } func (s Configuration) AuthSessionTTL() time.Duration { @@ -276,7 +277,7 @@ func getConfiguration(path string, defaultConfigFunc func() (Configuration, erro func GetConfiguration(path string, defaultConfigFunc func() (Configuration, error)) (Configuration, error) { if cfg, err := getConfiguration(path, defaultConfigFunc); err != nil { return cfg, err - } else if err := SetValuesFromEnv(bhAPIEnvironmentVariablePrefix, &cfg, os.Environ()); err != nil { + } else if err := SetValuesFromEnv(BHAPIEnvironmentVariablePrefix, &cfg, os.Environ()); err != nil { return cfg, err } else { return cfg, nil diff --git a/cmd/api/src/config/default.go b/cmd/api/src/config/default.go index 428f025c53..c1018bdc51 100644 --- a/cmd/api/src/config/default.go +++ b/cmd/api/src/config/default.go @@ -52,9 +52,10 @@ func NewDefaultConfiguration() (Configuration, error) { DisableIngest: false, DisableMigrations: false, EnableCypherMutations: false, - AuthSessionTTLHours: 8, // Default to a logged in auth session time to live of 8 hours - GraphQueryMemoryLimit: 2, // 2 GiB by default - FedRAMPEULAText: "", // Enterprise only + AuthSessionTTLHours: 8, // Default to a logged in auth session time to live of 8 hours + GraphQueryMemoryLimit: 2, // 2 GiB by default + FedRAMPEULAText: "", // Enterprise only + EnableTextLogger: false, // Default to JSON logging TLS: TLSConfiguration{}, SAML: SAMLConfiguration{}, GraphDriver: neo4j.DriverName, // Default to Neo4j as the graph driver diff --git a/cmd/api/src/daemons/api/bhapi/api.go b/cmd/api/src/daemons/api/bhapi/api.go index b0ca20beae..2f51be7ec3 100644 --- a/cmd/api/src/daemons/api/bhapi/api.go +++ b/cmd/api/src/daemons/api/bhapi/api.go @@ -20,10 +20,10 @@ import ( "context" "errors" "fmt" + "log" "log/slog" "net/http" - "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/src/config" ) @@ -40,7 +40,7 @@ func NewDaemon(cfg config.Configuration, handler http.Handler) Daemon { server: &http.Server{ Addr: cfg.BindAddress, Handler: handler, - ErrorLog: bhlog.NewLogLogger("BHAPI"), + ErrorLog: log.Default(), }, } } diff --git a/cmd/api/src/daemons/api/toolapi/api.go b/cmd/api/src/daemons/api/toolapi/api.go index b727593e6c..70a90d7bbf 100644 --- a/cmd/api/src/daemons/api/toolapi/api.go +++ b/cmd/api/src/daemons/api/toolapi/api.go @@ -20,13 +20,13 @@ import ( "context" "errors" "fmt" + "log" "log/slog" "net/http" "net/http/pprof" "github.com/go-chi/chi/v5" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/specterops/bloodhound/bhlog" "github.com/specterops/bloodhound/dawgs/graph" "github.com/specterops/bloodhound/src/api" "github.com/specterops/bloodhound/src/api/tools" @@ -101,7 +101,7 @@ func NewDaemon[DBType database.Database](ctx context.Context, connections bootst server: &http.Server{ Addr: cfg.MetricsPort, Handler: router, - ErrorLog: bhlog.NewLogLogger("ToolAPI"), + ErrorLog: log.Default(), }, } } diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 0a788d4d75..903e09b01c 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -123,6 +123,11 @@ services: context: tools/docker-compose dockerfile: api.Dockerfile command: "-c .air.toml ${AIR_FLAGS:-''}" + environment: + bhe_disable_cypher_complexity_limit: ${bhe_disable_cypher_complexity_limit:-false} + bhe_enable_cypher_mutations: ${bhe_enable_cypher_mutations:-false} + bhe_graph_query_memory_limit: ${bhe_graph_query_memory_limit:-2} + bhe_enable_text_logger: ${bhe_enable_text_logger:-true} ports: - ${BH_API_PORT:-127.0.0.1:8080}:8080 - ${TOOLAPI_PORT:-127.0.0.1:2112}:2112 diff --git a/packages/go/bhlog/bhlog.go b/packages/go/bhlog/bhlog.go index c04e9d8d39..9300267dde 100644 --- a/packages/go/bhlog/bhlog.go +++ b/packages/go/bhlog/bhlog.go @@ -18,7 +18,7 @@ package bhlog import ( "fmt" - "log" + "io" "log/slog" "os" "runtime" @@ -29,23 +29,33 @@ import ( "github.com/specterops/bloodhound/src/auth" ) -func ConfigureDefault() { - logger := NewDefaultLogger() - slog.SetDefault(logger) +func BaseHandler(pipe io.Writer, options *slog.HandlerOptions) slog.Handler { + return slog.NewJSONHandler(pipe, options) +} + +func TextHandler(pipe io.Writer, options *slog.HandlerOptions) slog.Handler { + return slog.NewTextHandler(pipe, options) } -func NewDefaultLogger() *slog.Logger { - return slog.New(&handlers.ContextHandler{ +func ConfigureDefault(text bool) { + var ( + handler slog.Handler + pipe = os.Stderr + handlerOptions = &slog.HandlerOptions{Level: level.GetLevelVar(), ReplaceAttr: handlers.ReplaceMessageKey} + ) + + if text { + handler = TextHandler(pipe, handlerOptions) + } else { + handler = BaseHandler(pipe, handlerOptions) + } + + logger := slog.New(&handlers.ContextHandler{ IDResolver: auth.NewIdentityResolver(), - Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: level.GetLevelVar(), ReplaceAttr: handlers.ReplaceMessageKey}), + Handler: handler, }) -} -func NewLogLogger(origin string) *log.Logger { - return slog.NewLogLogger(&handlers.OriginHandler{ - Origin: origin, - Handler: slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: level.GetLevelVar()}), - }, slog.LevelError) + slog.SetDefault(logger) } type stackFrame struct { diff --git a/packages/go/schemagen/main.go b/packages/go/schemagen/main.go index 8bd13e07d5..f7a8166513 100644 --- a/packages/go/schemagen/main.go +++ b/packages/go/schemagen/main.go @@ -71,7 +71,7 @@ func GenerateSharedTypeScript(projectRoot string, rootSchema Schema) error { } func main() { - bhlog.ConfigureDefault() + bhlog.ConfigureDefault(true) level.SetGlobalLevel(slog.LevelDebug) cfgBuilder := generator.NewConfigBuilder("/schemas") diff --git a/packages/go/stbernard/main.go b/packages/go/stbernard/main.go index 255aefaddb..59fe9ff0ee 100755 --- a/packages/go/stbernard/main.go +++ b/packages/go/stbernard/main.go @@ -34,7 +34,7 @@ func main() { env := environment.NewEnvironment() var rawLvl = env[environment.LogLevelVarName] - bhlog.ConfigureDefault() + bhlog.ConfigureDefault(true) if rawLvl == "" { rawLvl = "warn"