diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e7e70e30b106..989d9cf38b8f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -171,6 +171,7 @@ * [10341](https://github.com/grafana/loki/pull/10341) **ashwanthgoli** Deprecate older index types and non-object stores - `aws-dynamo, gcp, gcp-columnkey, bigtable, bigtable-hashed, cassandra, grpc` * [10344](https://github.com/grafana/loki/pull/10344) **ashwanthgoli** Compactor: deprecate `-boltdb.shipper.compactor.` prefix in favor of `-compactor.`. * [10073](https://github.com/grafana/loki/pull/10073) **sandeepsukhani,salvacorts,vlad-diachenko** Support attaching structured metadata to log lines. +* [11151](https://github.com/grafana/loki/pull/11151) **ashwanthgoli**: Removes already deprecated configs: `ruler.evaluation-delay-duration`, `boltdb.shipper.compactor.deletion-mode`, `validation.enforce-metric-name` and flags with prefix `-boltdb.shipper.compactor.*`. ##### Fixes diff --git a/clients/cmd/promtail/main.go b/clients/cmd/promtail/main.go index af19ce624eb0b..257c5eef01bc7 100644 --- a/clients/cmd/promtail/main.go +++ b/clients/cmd/promtail/main.go @@ -107,7 +107,7 @@ func main() { exit(1) } serverCfg := &config.Config.ServerConfig.Config - serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.DefaultRegisterer, true, false) + serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.DefaultRegisterer, false) // Use Stderr instead of files for the klog. klog.SetOutput(os.Stderr) diff --git a/clients/pkg/logentry/stages/drop_test.go b/clients/pkg/logentry/stages/drop_test.go index 697e49388f269..a7e5ffcb5665f 100644 --- a/clients/pkg/logentry/stages/drop_test.go +++ b/clients/pkg/logentry/stages/drop_test.go @@ -42,7 +42,7 @@ func Test_dropStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) Debug = true tests := []struct { diff --git a/clients/pkg/logentry/stages/labelallow_test.go b/clients/pkg/logentry/stages/labelallow_test.go index c968135af8b2f..a5cbcd8e3ce6b 100644 --- a/clients/pkg/logentry/stages/labelallow_test.go +++ b/clients/pkg/logentry/stages/labelallow_test.go @@ -16,7 +16,7 @@ func Test_addLabelStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) Debug = true tests := []struct { diff --git a/clients/pkg/logentry/stages/labeldrop_test.go b/clients/pkg/logentry/stages/labeldrop_test.go index 394702a349a9f..215a7888f8c31 100644 --- a/clients/pkg/logentry/stages/labeldrop_test.go +++ b/clients/pkg/logentry/stages/labeldrop_test.go @@ -16,7 +16,7 @@ func Test_dropLabelStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) Debug = true tests := []struct { diff --git a/clients/pkg/logentry/stages/multiline_test.go b/clients/pkg/logentry/stages/multiline_test.go index 315e7a1ad4fcb..33b71c8f5f023 100644 --- a/clients/pkg/logentry/stages/multiline_test.go +++ b/clients/pkg/logentry/stages/multiline_test.go @@ -20,7 +20,7 @@ func Test_multilineStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) Debug = true mcfg := &MultilineConfig{Expression: ptrFromString("^START"), MaxWaitTime: ptrFromString("3s")} @@ -52,7 +52,7 @@ func Test_multilineStage_MultiStreams(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) Debug = true mcfg := &MultilineConfig{Expression: ptrFromString("^START"), MaxWaitTime: ptrFromString("3s")} @@ -97,7 +97,7 @@ func Test_multilineStage_MaxWaitTime(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) Debug = true maxWait := 2 * time.Second diff --git a/clients/pkg/logentry/stages/pack_test.go b/clients/pkg/logentry/stages/pack_test.go index c1bf7814f636a..b767f90a76063 100644 --- a/clients/pkg/logentry/stages/pack_test.go +++ b/clients/pkg/logentry/stages/pack_test.go @@ -106,7 +106,7 @@ func Test_packStage_Run(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) Debug = true tests := []struct { diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget.go b/clients/pkg/promtail/targets/lokipush/pushtarget.go index 7ffe2ddfc9318..1e8affb389a17 100644 --- a/clients/pkg/promtail/targets/lokipush/pushtarget.go +++ b/clients/pkg/promtail/targets/lokipush/pushtarget.go @@ -80,7 +80,7 @@ func (t *PushTarget) run() error { // The logger registers a metric which will cause a duplicate registry panic unless we provide an empty registry // The metric created is for counting log lines and isn't likely to be missed. serverCfg := &t.config.Server - serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.NewRegistry(), true, false) + serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.NewRegistry(), false) // Set new registry for upcoming metric server // If not, it'll likely panic when the tool gets reloaded. diff --git a/clients/pkg/promtail/targets/windows/target_test.go b/clients/pkg/promtail/targets/windows/target_test.go index f1f786e3a6f6e..a9a692b21ecfc 100644 --- a/clients/pkg/promtail/targets/windows/target_test.go +++ b/clients/pkg/promtail/targets/windows/target_test.go @@ -28,7 +28,7 @@ func init() { // Enable debug logging cfg := &server.Config{} _ = cfg.LogLevel.Set("debug") - util_log.InitLogger(cfg, nil, true, false) + util_log.InitLogger(cfg, nil, false) } // Test that you can use to generate event logs locally. diff --git a/cmd/logql-analyzer/main.go b/cmd/logql-analyzer/main.go index b6e894a594229..5031dbad7d894 100644 --- a/cmd/logql-analyzer/main.go +++ b/cmd/logql-analyzer/main.go @@ -19,7 +19,7 @@ func main() { cfg := getConfig() util_log.InitLogger(&server.Config{ LogLevel: cfg.LogLevel, - }, prometheus.DefaultRegisterer, true, false) + }, prometheus.DefaultRegisterer, false) s, err := createServer(cfg, util_log.Logger) if err != nil { level.Error(util_log.Logger).Log("msg", "error while creating the server", "err", err) diff --git a/cmd/loki/main.go b/cmd/loki/main.go index 6346d222bb348..845104eee8de5 100644 --- a/cmd/loki/main.go +++ b/cmd/loki/main.go @@ -52,7 +52,7 @@ func main() { exit(1) } serverCfg := &config.Server - serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.DefaultRegisterer, config.UseBufferedLogger, config.UseSyncLogger) + serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.DefaultRegisterer, false) // Validate the config once both the config file has been loaded // and CLI flags parsed. diff --git a/cmd/querytee/main.go b/cmd/querytee/main.go index ca7fa29d35d86..9007dd6a3e3f2 100644 --- a/cmd/querytee/main.go +++ b/cmd/querytee/main.go @@ -30,7 +30,7 @@ func main() { util_log.InitLogger(&server.Config{ LogLevel: cfg.LogLevel, - }, prometheus.DefaultRegisterer, true, false) + }, prometheus.DefaultRegisterer, false) // Run the instrumentation server. registry := prometheus.NewRegistry() diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index fab1750c5c3f5..2548beab8109f 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -183,8 +183,7 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set [schema_config: ] # The compactor block configures the compactor component, which compacts index -# shards for performance. `-boltdb.shipper.compactor.` prefix is deprecated, -# please use `-compactor.` instead. +# shards for performance. [compactor: ] # The limits_config block configures global and per-tenant limits in Loki. @@ -2267,7 +2266,7 @@ Configures the chunk index schema and where it is stored. ### compactor -The `compactor` block configures the compactor component, which compacts index shards for performance. `-boltdb.shipper.compactor.` prefix is deprecated, please use `-compactor.` instead. +The `compactor` block configures the compactor component, which compacts index shards for performance. ```yaml # Directory where files can be downloaded for compaction. @@ -2432,9 +2431,6 @@ compactor_ring: # -compactor.tables-to-compact, this is useful when clearing compactor backlogs. # CLI flag: -compactor.skip-latest-n-tables [skip_latest_n_tables: | default = 0] - -# Deprecated: Use deletion_mode per tenant configuration instead. -[deletion_mode: | default = ""] ``` ### bloom_compactor @@ -2590,10 +2586,6 @@ The `limits_config` block configures global and per-tenant limits in Loki. # CLI flag: -validation.create-grace-period [creation_grace_period: | default = 10m] -# Enforce every sample has a metric name. -# CLI flag: -validation.enforce-metric-name -[enforce_metric_name: | default = true] - # Maximum line size on ingestion path. Example: 256kb. Any log line exceeding # this limit will be discarded unless `distributor.max-line-size-truncate` is # set which in case it is truncated instead of discarding it completely. There @@ -2760,11 +2752,6 @@ The `limits_config` block configures global and per-tenant limits in Loki. # CLI flag: -limits.volume-max-series [volume_max_series: | default = 1000] -# Deprecated. Duration to delay the evaluation of rules to ensure the underlying -# metrics have been pushed to Cortex. -# CLI flag: -ruler.evaluation-delay-duration -[ruler_evaluation_delay_duration: | default = 0s] - # Maximum number of rules per rule group per-tenant. 0 to disable. # CLI flag: -ruler.max-rules-per-rule-group [ruler_max_rules_per_rule_group: | default = 0] diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md index c33f0529b9747..e76a3d1b191df 100644 --- a/docs/sources/setup/upgrade/_index.md +++ b/docs/sources/setup/upgrade/_index.md @@ -110,7 +110,7 @@ The previous default value `false` is applied. #### Deprecated configuration options are removed -1. Removed already deprecated `store.max-look-back-period` CLI flag and the corresponding YAML settings. Use. Use `querier.max-query-lookback` config instead. +1. Removed already deprecated `store.max-look-back-period` CLI flag and the corresponding YAML settings. Use `querier.max-query-lookback` config instead. 1. Removes already deprecated `-querier.engine.timeout` CLI flag and the corresponding YAML setting. 1. Also removes the `query_timeout` from the querier YAML section. Instead of configuring `query_timeout` under `querier`, you now configure it in [Limits Config](/docs/loki/latest/configuration/#limits_config). 1. `s3.sse-encryption` is removed. AWS now defaults encryption of all buckets to SSE-S3. Use `sse.type` to set SSE type. @@ -121,6 +121,10 @@ The previous default value `false` is applied. 1. `frontend.cache-split-interval` CLI flag is removed. Results caching interval is now determined by `querier.split-queries-by-interval`. 1. `querier.worker-parallelism` CLI flag and its corresponding yaml setting are now removed as it does not offer additional value to already existing `querier.max-concurrent`. We recommend configuring `querier.max-concurrent` to limit the max concurrent requests processed by the queriers. +1. `ruler.evaluation-delay-duration` CLI flag and the corresponding YAML setting are removed. +1. `validation.enforce-metric-name` CLI flag and the corresponding YAML setting are removed. +1. `boltdb.shipper.compactor.deletion-mode` CLI flag and the corresponding YAML setting are removed. You can instead configure the `compactor.deletion-mode` CLI flag or `deletion_mode` YAML setting in [Limits Config](/docs/loki/latest/configuration/#limits_config). +1. Compactor CLI flags that use the prefix `boltdb.shipper.compactor.` are removed. You can instead use CLI flags with the `compactor.` prefix. #### Legacy ingester shutdown handler is removed diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go index 4fd9852234022..048526a789afb 100644 --- a/integration/loki_micro_services_test.go +++ b/integration/loki_micro_services_test.go @@ -33,10 +33,10 @@ func TestMicroServicesIngestQuery(t *testing.T) { tCompactor = clu.AddComponent( "compactor", "-target=compactor", - "-boltdb.shipper.compactor.compaction-interval=1s", - "-boltdb.shipper.compactor.retention-delete-delay=1s", + "-compactor.compaction-interval=1s", + "-compactor.retention-delete-delay=1s", // By default, a minute is added to the delete request start time. This compensates for that. - "-boltdb.shipper.compactor.delete-request-cancel-period=-60s", + "-compactor.delete-request-cancel-period=-60s", "-compactor.deletion-mode=filter-and-delete", ) tIndexGateway = clu.AddComponent( @@ -161,7 +161,7 @@ func TestMicroServicesIngestQueryWithSchemaChange(t *testing.T) { tCompactor = clu.AddComponent( "compactor", "-target=compactor", - "-boltdb.shipper.compactor.compaction-interval=1s", + "-compactor.compaction-interval=1s", ) tDistributor = clu.AddComponent( "distributor", @@ -354,7 +354,7 @@ func TestMicroServicesIngestQueryOverMultipleBucketSingleProvider(t *testing.T) tCompactor = clu.AddComponent( "compactor", "-target=compactor", - "-boltdb.shipper.compactor.compaction-interval=1s", + "-compactor.compaction-interval=1s", ) tDistributor = clu.AddComponent( "distributor", @@ -472,10 +472,10 @@ func TestSchedulerRing(t *testing.T) { tCompactor = clu.AddComponent( "compactor", "-target=compactor", - "-boltdb.shipper.compactor.compaction-interval=1s", - "-boltdb.shipper.compactor.retention-delete-delay=1s", + "-compactor.compaction-interval=1s", + "-compactor.retention-delete-delay=1s", // By default, a minute is added to the delete request start time. This compensates for that. - "-boltdb.shipper.compactor.delete-request-cancel-period=-60s", + "-compactor.delete-request-cancel-period=-60s", "-compactor.deletion-mode=filter-and-delete", ) tIndexGateway = clu.AddComponent( @@ -592,10 +592,10 @@ func TestOTLPLogsIngestQuery(t *testing.T) { tCompactor = clu.AddComponent( "compactor", "-target=compactor", - "-boltdb.shipper.compactor.compaction-interval=1s", - "-boltdb.shipper.compactor.retention-delete-delay=1s", + "-compactor.compaction-interval=1s", + "-compactor.retention-delete-delay=1s", // By default, a minute is added to the delete request start time. This compensates for that. - "-boltdb.shipper.compactor.delete-request-cancel-period=-60s", + "-compactor.delete-request-cancel-period=-60s", "-compactor.deletion-mode=filter-and-delete", ) tIndexGateway = clu.AddComponent( @@ -745,7 +745,7 @@ func TestCategorizedLabels(t *testing.T) { tCompactor = clu.AddComponent( "compactor", "-target=compactor", - "-boltdb.shipper.compactor.compaction-interval=1s", + "-compactor.compaction-interval=1s", "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), ) ) diff --git a/pkg/compactor/client/http.go b/pkg/compactor/client/http.go index b5026a8194920..ea30094055519 100644 --- a/pkg/compactor/client/http.go +++ b/pkg/compactor/client/http.go @@ -30,7 +30,7 @@ type HTTPConfig struct { // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *HTTPConfig) RegisterFlags(f *flag.FlagSet) { - prefix := "boltdb.shipper.compactor.client" + prefix := "compactor.client" f.BoolVar(&cfg.TLSEnabled, prefix+".tls-enabled", false, "Enable TLS in the HTTP client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to HTTP server will be used.") cfg.TLS.RegisterFlagsWithPrefix(prefix, f) diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 53a8c8c4c6593..a45248af6bbd3 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -12,7 +12,6 @@ import ( "time" "github.com/go-kit/log/level" - "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" @@ -89,47 +88,29 @@ type Config struct { RunOnce bool `yaml:"_" doc:"hidden"` TablesToCompact int `yaml:"tables_to_compact"` SkipLatestNTables int `yaml:"skip_latest_n_tables"` - - // Deprecated - DeletionMode string `yaml:"deletion_mode" doc:"deprecated|description=Use deletion_mode per tenant configuration instead."` -} - -// RegisterFlags registers flags. -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - var deprecated = "" - if prefix != "" { - deprecated = "Deprecated: " - } - - f.StringVar(&cfg.WorkingDirectory, prefix+"compactor.working-directory", "", deprecated+"Directory where files can be downloaded for compaction.") - f.DurationVar(&cfg.CompactionInterval, prefix+"compactor.compaction-interval", 10*time.Minute, deprecated+"Interval at which to re-run the compaction operation.") - f.DurationVar(&cfg.ApplyRetentionInterval, prefix+"compactor.apply-retention-interval", 0, deprecated+"Interval at which to apply/enforce retention. 0 means run at same interval as compaction. If non-zero, it should always be a multiple of compaction interval.") - f.DurationVar(&cfg.RetentionDeleteDelay, prefix+"compactor.retention-delete-delay", 2*time.Hour, deprecated+"Delay after which chunks will be fully deleted during retention.") - f.BoolVar(&cfg.RetentionEnabled, prefix+"compactor.retention-enabled", false, deprecated+"(Experimental) Activate custom (per-stream,per-tenant) retention.") - f.IntVar(&cfg.RetentionDeleteWorkCount, prefix+"compactor.retention-delete-worker-count", 150, deprecated+"The total amount of worker to use to delete chunks.") - f.StringVar(&cfg.DeleteRequestStore, prefix+"compactor.delete-request-store", "", deprecated+"Store used for managing delete requests.") - f.IntVar(&cfg.DeleteBatchSize, prefix+"compactor.delete-batch-size", 70, deprecated+"The max number of delete requests to run per compaction cycle.") - f.DurationVar(&cfg.DeleteRequestCancelPeriod, prefix+"compactor.delete-request-cancel-period", 24*time.Hour, deprecated+"Allow cancellation of delete request until duration after they are created. Data would be deleted only after delete requests have been older than this duration. Ideally this should be set to at least 24h.") - f.DurationVar(&cfg.DeleteMaxInterval, prefix+"compactor.delete-max-interval", 24*time.Hour, deprecated+"Constrain the size of any single delete request. When a delete request > delete_max_interval is input, the request is sharded into smaller requests of no more than delete_max_interval") - f.DurationVar(&cfg.RetentionTableTimeout, prefix+"compactor.retention-table-timeout", 0, deprecated+"The maximum amount of time to spend running retention and deletion on any given table in the index.") - f.IntVar(&cfg.MaxCompactionParallelism, prefix+"compactor.max-compaction-parallelism", 1, deprecated+"Maximum number of tables to compact in parallel. While increasing this value, please make sure compactor has enough disk space allocated to be able to store and compact as many tables.") - f.IntVar(&cfg.UploadParallelism, prefix+"compactor.upload-parallelism", 10, deprecated+"Number of upload/remove operations to execute in parallel when finalizing a compaction. NOTE: This setting is per compaction operation, which can be executed in parallel. The upper bound on the number of concurrent uploads is upload_parallelism * max_compaction_parallelism.") - f.BoolVar(&cfg.RunOnce, prefix+"compactor.run-once", false, deprecated+"Run the compactor one time to cleanup and compact index files only (no retention applied)") - - cfg.CompactorRing.RegisterFlagsWithPrefix(prefix+"compactor.", "collectors/", f) - f.IntVar(&cfg.TablesToCompact, prefix+"compactor.tables-to-compact", 0, deprecated+"Number of tables that compactor will try to compact. Newer tables are chosen when this is less than the number of tables available.") - f.IntVar(&cfg.SkipLatestNTables, prefix+"compactor.skip-latest-n-tables", 0, deprecated+"Do not compact N latest tables. Together with -compactor.run-once and -compactor.tables-to-compact, this is useful when clearing compactor backlogs.") } // RegisterFlags registers flags. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("", f) + f.StringVar(&cfg.WorkingDirectory, "compactor.working-directory", "", "Directory where files can be downloaded for compaction.") + f.DurationVar(&cfg.CompactionInterval, "compactor.compaction-interval", 10*time.Minute, "Interval at which to re-run the compaction operation.") + f.DurationVar(&cfg.ApplyRetentionInterval, "compactor.apply-retention-interval", 0, "Interval at which to apply/enforce retention. 0 means run at same interval as compaction. If non-zero, it should always be a multiple of compaction interval.") + f.DurationVar(&cfg.RetentionDeleteDelay, "compactor.retention-delete-delay", 2*time.Hour, "Delay after which chunks will be fully deleted during retention.") + f.BoolVar(&cfg.RetentionEnabled, "compactor.retention-enabled", false, "(Experimental) Activate custom (per-stream,per-tenant) retention.") + f.IntVar(&cfg.RetentionDeleteWorkCount, "compactor.retention-delete-worker-count", 150, "The total amount of worker to use to delete chunks.") + f.StringVar(&cfg.DeleteRequestStore, "compactor.delete-request-store", "", "Store used for managing delete requests.") f.StringVar(&cfg.DeleteRequestStoreKeyPrefix, "compactor.delete-request-store.key-prefix", "index/", "Path prefix for storing delete requests.") - - // Deprecated. CLI flags with boltdb.shipper. prefix will be removed in the next major version. - cfg.RegisterFlagsWithPrefix("boltdb.shipper.", f) - // Deprecated - flagext.DeprecatedFlag(f, "boltdb.shipper.compactor.deletion-mode", "Deprecated. This has been moved to the deletion_mode per tenant configuration.", util_log.Logger) + f.IntVar(&cfg.DeleteBatchSize, "compactor.delete-batch-size", 70, "The max number of delete requests to run per compaction cycle.") + f.DurationVar(&cfg.DeleteRequestCancelPeriod, "compactor.delete-request-cancel-period", 24*time.Hour, "Allow cancellation of delete request until duration after they are created. Data would be deleted only after delete requests have been older than this duration. Ideally this should be set to at least 24h.") + f.DurationVar(&cfg.DeleteMaxInterval, "compactor.delete-max-interval", 24*time.Hour, "Constrain the size of any single delete request. When a delete request > delete_max_interval is input, the request is sharded into smaller requests of no more than delete_max_interval") + f.DurationVar(&cfg.RetentionTableTimeout, "compactor.retention-table-timeout", 0, "The maximum amount of time to spend running retention and deletion on any given table in the index.") + f.IntVar(&cfg.MaxCompactionParallelism, "compactor.max-compaction-parallelism", 1, "Maximum number of tables to compact in parallel. While increasing this value, please make sure compactor has enough disk space allocated to be able to store and compact as many tables.") + f.IntVar(&cfg.UploadParallelism, "compactor.upload-parallelism", 10, "Number of upload/remove operations to execute in parallel when finalizing a compaction. NOTE: This setting is per compaction operation, which can be executed in parallel. The upper bound on the number of concurrent uploads is upload_parallelism * max_compaction_parallelism.") + f.BoolVar(&cfg.RunOnce, "compactor.run-once", false, "Run the compactor one time to cleanup and compact index files only (no retention applied)") + f.IntVar(&cfg.TablesToCompact, "compactor.tables-to-compact", 0, "Number of tables that compactor will try to compact. Newer tables are chosen when this is less than the number of tables available.") + f.IntVar(&cfg.SkipLatestNTables, "compactor.skip-latest-n-tables", 0, "Do not compact N latest tables. Together with -compactor.run-once and -compactor.tables-to-compact, this is useful when clearing compactor backlogs.") + + cfg.CompactorRing.RegisterFlagsWithPrefix("compactor.", "collectors/", f) } // Validate verifies the config does not contain inappropriate values @@ -152,10 +133,6 @@ func (cfg *Config) Validate() error { } } - if cfg.DeletionMode != "" { - level.Warn(util_log.Logger).Log("msg", "boltdb.shipper.compactor.deletion-mode has been deprecated and will be ignored. This has been moved to the deletion_mode per tenant configuration.") - } - return nil } diff --git a/pkg/compactor/retention/util_test.go b/pkg/compactor/retention/util_test.go index eb17624bd59a6..bb2f0fe2e0a77 100644 --- a/pkg/compactor/retention/util_test.go +++ b/pkg/compactor/retention/util_test.go @@ -332,7 +332,7 @@ func newTestStore(t testing.TB) *testStore { t.Helper() servercfg := &ww.Config{} require.Nil(t, servercfg.LogLevel.Set("debug")) - util_log.InitLogger(servercfg, nil, true, false) + util_log.InitLogger(servercfg, nil, false) workdir := t.TempDir() filepath.Join(workdir, "index") indexDir := filepath.Join(workdir, "index") diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 32aa189ab1d63..e7899f7ea593c 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -98,7 +98,6 @@ func TestDistributor(t *testing.T) { t.Run(fmt.Sprintf("[%d](lines=%v)", i, tc.lines), func(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) - limits.EnforceMetricName = false limits.IngestionRateMB = ingestionRateLimit limits.IngestionBurstSizeMB = ingestionRateLimit limits.MaxLineSize = fe.ByteSize(tc.maxLineSize) @@ -494,7 +493,6 @@ func TestDistributorPushErrors(t *testing.T) { func Test_SortLabelsOnPush(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) - limits.EnforceMetricName = false ingester := &mockIngester{} distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) @@ -510,7 +508,6 @@ func Test_TruncateLogLines(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) - limits.EnforceMetricName = false limits.MaxLineSize = 5 limits.MaxLineSizeTruncate = true return limits, &mockIngester{} @@ -778,7 +775,6 @@ func BenchmarkShardStream(b *testing.B) { func Benchmark_SortLabelsOnPush(b *testing.B) { limits := &validation.Limits{} flagext.DefaultValues(limits) - limits.EnforceMetricName = false distributors, _ := prepare(&testing.T{}, 1, 5, limits, nil) d := distributors[0] request := makeWriteRequest(10, 10) @@ -799,7 +795,6 @@ func Benchmark_Push(b *testing.B) { limits.IngestionBurstSizeMB = math.MaxInt32 limits.CardinalityLimit = math.MaxInt32 limits.IngestionRateMB = math.MaxInt32 - limits.EnforceMetricName = false limits.MaxLineSize = math.MaxInt32 limits.RejectOldSamples = true limits.RejectOldSamplesMaxAge = model.Duration(24 * time.Hour) @@ -972,7 +967,6 @@ func TestShardCountFor(t *testing.T) { t.Run(tc.name, func(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) - limits.EnforceMetricName = false limits.ShardStreams.DesiredRate = tc.desiredRate d := &Distributor{ @@ -1064,7 +1058,6 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { t.Run(testName, func(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) - limits.EnforceMetricName = false limits.IngestionRateStrategy = testData.ingestionRateStrategy limits.IngestionRateMB = testData.ingestionRateMB limits.IngestionBurstSizeMB = testData.ingestionBurstSizeMB diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go index 761ec8b2aa97b..add9d17708dbd 100644 --- a/pkg/distributor/limits.go +++ b/pkg/distributor/limits.go @@ -12,7 +12,6 @@ type Limits interface { retention.Limits MaxLineSize(userID string) int MaxLineSizeTruncate(userID string) bool - EnforceMetricName(userID string) bool MaxLabelNamesPerSeries(userID string) int MaxLabelNameLength(userID string) int MaxLabelValueLength(userID string) int diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index cbfbda3b7cfa8..9c7ba87766513 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -73,11 +73,6 @@ type Config struct { HTTPPrefix string `yaml:"http_prefix" doc:"hidden"` BallastBytes int `yaml:"ballast_bytes"` - // TODO(dannyk): Remove these config options before next release; they don't need to be configurable. - // These are only here to allow us to test the new functionality. - UseBufferedLogger bool `yaml:"use_buffered_logger" doc:"hidden"` - UseSyncLogger bool `yaml:"use_sync_logger" doc:"hidden"` - Server server.Config `yaml:"server,omitempty"` InternalServer internalserver.Config `yaml:"internal_server,omitempty" doc:"hidden"` Distributor distributor.Config `yaml:"distributor,omitempty"` @@ -140,8 +135,6 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { "The ballast will not consume physical memory, because it is never read from. "+ "It will, however, distort metrics, because it is counted as live memory. ", ) - f.BoolVar(&c.UseBufferedLogger, "log.use-buffered", true, "Deprecated. Uses a line-buffered logger to improve performance.") - f.BoolVar(&c.UseSyncLogger, "log.use-sync", true, "Deprecated. Forces all lines logged to hold a mutex to serialize writes.") //TODO(trevorwhitney): flip this to false with Loki 3.0 f.BoolVar(&c.LegacyReadTarget, "legacy-read-mode", true, "Set to false to disable the legacy read mode and use new scalable mode with 3rd backend target. "+ diff --git a/pkg/ruler/base/compat.go b/pkg/ruler/base/compat.go index 35d40c3f44b9a..b29417aeb8cc9 100644 --- a/pkg/ruler/base/compat.go +++ b/pkg/ruler/base/compat.go @@ -15,7 +15,6 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" - "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" @@ -35,12 +34,11 @@ type PusherAppender struct { failedWrites prometheus.Counter totalWrites prometheus.Counter - ctx context.Context - pusher Pusher - labels []labels.Labels - samples []logproto.LegacySample - userID string - evaluationDelay time.Duration + ctx context.Context + pusher Pusher + labels []labels.Labels + samples []logproto.LegacySample + userID string } var _ storage.Appender = (*PusherAppender)(nil) @@ -48,18 +46,6 @@ var _ storage.Appender = (*PusherAppender)(nil) func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.labels = append(a.labels, l) - // Adapt staleness markers for ruler evaluation delay. As the upstream code - // is using the actual time, when there is a no longer available series. - // This then causes 'out of order' append failures once the series is - // becoming available again. - // see https://github.com/prometheus/prometheus/blob/6c56a1faaaad07317ff585bda75b99bdba0517ad/rules/manager.go#L647-L660 - // Similar to staleness markers, the rule manager also appends actual time to the ALERTS and ALERTS_FOR_STATE series. - // See: https://github.com/prometheus/prometheus/blob/ae086c73cb4d6db9e8b67d5038d3704fea6aec4a/rules/alerting.go#L414-L417 - metricName := l.Get(labels.MetricName) - if a.evaluationDelay > 0 && (value.IsStaleNaN(v) || metricName == "ALERTS" || metricName == "ALERTS_FOR_STATE") { - t -= a.evaluationDelay.Milliseconds() - } - a.samples = append(a.samples, logproto.LegacySample{ TimestampMs: t, Value: v, @@ -105,19 +91,17 @@ func (a *PusherAppender) Rollback() error { // PusherAppendable fulfills the storage.Appendable interface for prometheus manager type PusherAppendable struct { - pusher Pusher - userID string - rulesLimits RulesLimits + pusher Pusher + userID string totalWrites prometheus.Counter failedWrites prometheus.Counter } -func NewPusherAppendable(pusher Pusher, userID string, limits RulesLimits, totalWrites, failedWrites prometheus.Counter) *PusherAppendable { +func NewPusherAppendable(pusher Pusher, userID string, totalWrites, failedWrites prometheus.Counter) *PusherAppendable { return &PusherAppendable{ pusher: pusher, userID: userID, - rulesLimits: limits, totalWrites: totalWrites, failedWrites: failedWrites, } @@ -129,34 +113,20 @@ func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { failedWrites: t.failedWrites, totalWrites: t.totalWrites, - ctx: ctx, - pusher: t.pusher, - userID: t.userID, - evaluationDelay: t.rulesLimits.EvaluationDelay(t.userID), + ctx: ctx, + pusher: t.pusher, + userID: t.userID, } } // RulesLimits defines limits used by Ruler. type RulesLimits interface { - EvaluationDelay(userID string) time.Duration RulerTenantShardSize(userID string) int RulerMaxRuleGroupsPerTenant(userID string) int RulerMaxRulesPerRuleGroup(userID string) int RulerAlertManagerConfig(userID string) *config.AlertManagerConfig } -// EngineQueryFunc returns a new query function using the rules.EngineQueryFunc function -// and passing an altered timestamp. -func EngineQueryFunc(engine *promql.Engine, q storage.Queryable, overrides RulesLimits, userID string) rules.QueryFunc { - return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - orig := rules.EngineQueryFunc(engine, q) - // Delay the evaluation of all rules by a set interval to give a buffer - // to metric that haven't been forwarded to cortex yet. - evaluationDelay := overrides.EvaluationDelay(userID) - return orig(ctx, qs, t.Add(-evaluationDelay)) - } -} - func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Counter) rules.QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { queries.Inc() @@ -234,7 +204,7 @@ type RulesManager interface { // ManagerFactory is a function that creates new RulesManager for given user and notifier.Manager. type ManagerFactory func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager -func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engine *promql.Engine, overrides RulesLimits, reg prometheus.Registerer, metricsNamespace string) ManagerFactory { +func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engine *promql.Engine, reg prometheus.Registerer, metricsNamespace string) ManagerFactory { totalWrites := promauto.With(reg).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, Name: "ruler_write_requests_total", @@ -277,9 +247,9 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi } return rules.NewManager(&rules.ManagerOptions{ - Appendable: NewPusherAppendable(p, userID, overrides, totalWrites, failedWrites), + Appendable: NewPusherAppendable(p, userID, totalWrites, failedWrites), Queryable: q, - QueryFunc: RecordAndReportRuleQueryMetrics(MetricsQueryFunc(EngineQueryFunc(engine, q, overrides, userID), totalQueries, failedQueries), queryTime, logger), + QueryFunc: RecordAndReportRuleQueryMetrics(MetricsQueryFunc(rules.EngineQueryFunc(engine, q), totalQueries, failedQueries), queryTime, logger), Context: user.InjectOrgID(ctx, userID), ExternalURL: cfg.ExternalURL.URL, NotifyFunc: SendAlerts(notifier, cfg.ExternalURL.URL.String(), cfg.DatasourceUID), diff --git a/pkg/ruler/base/compat_test.go b/pkg/ruler/base/compat_test.go index ba5242b43edea..d4cdf4f298a34 100644 --- a/pkg/ruler/base/compat_test.go +++ b/pkg/ruler/base/compat_test.go @@ -34,72 +34,41 @@ func (p *fakePusher) Push(_ context.Context, r *logproto.WriteRequest) (*logprot func TestPusherAppendable(t *testing.T) { pusher := &fakePusher{} - pa := NewPusherAppendable(pusher, "user-1", nil, prometheus.NewCounter(prometheus.CounterOpts{}), prometheus.NewCounter(prometheus.CounterOpts{})) + pa := NewPusherAppendable(pusher, "user-1", prometheus.NewCounter(prometheus.CounterOpts{}), prometheus.NewCounter(prometheus.CounterOpts{})) for _, tc := range []struct { name string series string - evalDelay time.Duration value float64 expectedTS int64 }{ { - name: "tenant without delay, normal value", + name: "tenant, normal value", series: "foo_bar", value: 1.234, expectedTS: 120_000, }, { - name: "tenant without delay, stale nan value", + name: "tenant, stale nan value", series: "foo_bar", value: math.Float64frombits(value.StaleNaN), expectedTS: 120_000, }, { - name: "tenant with delay, normal value", - series: "foo_bar", - value: 1.234, - expectedTS: 120_000, - evalDelay: time.Minute, - }, - { - name: "tenant with delay, stale nan value", - value: math.Float64frombits(value.StaleNaN), - expectedTS: 60_000, - evalDelay: time.Minute, - }, - { - name: "ALERTS without delay, normal value", + name: "ALERTS, normal value", series: `ALERTS{alertname="boop"}`, value: 1.234, expectedTS: 120_000, }, { - name: "ALERTS without delay, stale nan value", + name: "ALERTS, stale nan value", series: `ALERTS{alertname="boop"}`, value: math.Float64frombits(value.StaleNaN), expectedTS: 120_000, }, - { - name: "ALERTS with delay, normal value", - series: `ALERTS{alertname="boop"}`, - value: 1.234, - expectedTS: 60_000, - evalDelay: time.Minute, - }, - { - name: "ALERTS with delay, stale nan value", - series: `ALERTS_FOR_STATE{alertname="boop"}`, - value: math.Float64frombits(value.StaleNaN), - expectedTS: 60_000, - evalDelay: time.Minute, - }, } { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() - pa.rulesLimits = &ruleLimits{ - evalDelay: tc.evalDelay, - } lbls, err := parser.ParseMetric(tc.series) require.NoError(t, err) @@ -154,7 +123,7 @@ func TestPusherErrors(t *testing.T) { writes := prometheus.NewCounter(prometheus.CounterOpts{}) failures := prometheus.NewCounter(prometheus.CounterOpts{}) - pa := NewPusherAppendable(pusher, "user-1", ruleLimits{evalDelay: 10 * time.Second}, writes, failures) + pa := NewPusherAppendable(pusher, "user-1", writes, failures) lbls, err := parser.ParseMetric("foo_bar") require.NoError(t, err) diff --git a/pkg/ruler/base/ruler_test.go b/pkg/ruler/base/ruler_test.go index 694dc143b4a32..99839ed652536 100644 --- a/pkg/ruler/base/ruler_test.go +++ b/pkg/ruler/base/ruler_test.go @@ -84,17 +84,12 @@ func defaultRulerConfig(t testing.TB, store rulestore.RuleStore) Config { } type ruleLimits struct { - evalDelay time.Duration tenantShard int maxRulesPerRuleGroup int maxRuleGroups int alertManagerConfig map[string]*config.AlertManagerConfig } -func (r ruleLimits) EvaluationDelay(_ string) time.Duration { - return r.evalDelay -} - func (r ruleLimits) RulerTenantShardSize(_ string) int { return r.tenantShard } @@ -144,12 +139,12 @@ func testSetup(t *testing.T, q storage.Querier) (*promql.Engine, storage.Queryab reg := prometheus.NewRegistry() queryable := testQueryableFunc(q) - return engine, queryable, pusher, l, ruleLimits{evalDelay: 0, maxRuleGroups: 20, maxRulesPerRuleGroup: 15}, reg + return engine, queryable, pusher, l, ruleLimits{maxRuleGroups: 20, maxRulesPerRuleGroup: 15}, reg } func newManager(t *testing.T, cfg Config, q storage.Querier) *DefaultMultiTenantManager { engine, queryable, pusher, logger, overrides, reg := testSetup(t, q) - manager, err := NewDefaultMultiTenantManager(cfg, DefaultTenantManagerFactory(cfg, pusher, queryable, engine, overrides, nil, constants.Loki), reg, logger, overrides, constants.Loki) + manager, err := NewDefaultMultiTenantManager(cfg, DefaultTenantManagerFactory(cfg, pusher, queryable, engine, nil, constants.Loki), reg, logger, overrides, constants.Loki) require.NoError(t, err) return manager @@ -158,10 +153,10 @@ func newManager(t *testing.T, cfg Config, q storage.Querier) *DefaultMultiTenant func newMultiTenantManager(t *testing.T, cfg Config, q storage.Querier, amConf map[string]*config.AlertManagerConfig) *DefaultMultiTenantManager { engine, queryable, pusher, logger, _, reg := testSetup(t, q) - overrides := ruleLimits{evalDelay: 0, maxRuleGroups: 20, maxRulesPerRuleGroup: 15} + overrides := ruleLimits{maxRuleGroups: 20, maxRulesPerRuleGroup: 15} overrides.alertManagerConfig = amConf - manager, err := NewDefaultMultiTenantManager(cfg, DefaultTenantManagerFactory(cfg, pusher, queryable, engine, overrides, nil, constants.Loki), reg, logger, overrides, constants.Loki) + manager, err := NewDefaultMultiTenantManager(cfg, DefaultTenantManagerFactory(cfg, pusher, queryable, engine, nil, constants.Loki), reg, logger, overrides, constants.Loki) require.NoError(t, err) return manager @@ -213,7 +208,7 @@ func buildRuler(t *testing.T, rulerConfig Config, q storage.Querier, clientMetri storage, err := NewLegacyRuleStore(rulerConfig.StoreConfig, hedging.Config{}, clientMetrics, promRules.FileLoader{}, log.NewNopLogger()) require.NoError(t, err) - managerFactory := DefaultTenantManagerFactory(rulerConfig, pusher, queryable, engine, overrides, reg, constants.Loki) + managerFactory := DefaultTenantManagerFactory(rulerConfig, pusher, queryable, engine, reg, constants.Loki) manager, err := NewDefaultMultiTenantManager(rulerConfig, managerFactory, reg, log.NewNopLogger(), overrides, constants.Loki) require.NoError(t, err) @@ -476,7 +471,7 @@ func TestGetRules(t *testing.T) { m := loki_storage.NewClientMetrics() defer m.Unregister() r := buildRuler(t, cfg, nil, m, rulerAddrMap) - r.limits = ruleLimits{evalDelay: 0, tenantShard: tc.shuffleShardSize} + r.limits = ruleLimits{tenantShard: tc.shuffleShardSize} rulerAddrMap[id] = r if r.ring != nil { require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.ring)) @@ -1420,7 +1415,7 @@ func TestSharding(t *testing.T) { m := loki_storage.NewClientMetrics() defer m.Unregister() r := buildRuler(t, cfg, nil, m, nil) - r.limits = ruleLimits{evalDelay: 0, tenantShard: tc.shuffleShardSize} + r.limits = ruleLimits{tenantShard: tc.shuffleShardSize} if forceRing != nil { r.ring = forceRing diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c6d8d62dd86f5..db6316e9986d0 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -58,7 +58,7 @@ type RulesLimits interface { // queryFunc returns a new query function using the rules.EngineQueryFunc function // and passing an altered timestamp. -func queryFunc(evaluator Evaluator, overrides RulesLimits, checker readyChecker, userID string, logger log.Logger) rules.QueryFunc { +func queryFunc(evaluator Evaluator, checker readyChecker, userID string, logger log.Logger) rules.QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { hash := logql.HashedQuery(qs) detail := rules.FromOriginContext(ctx) @@ -72,8 +72,7 @@ func queryFunc(evaluator Evaluator, overrides RulesLimits, checker readyChecker, return nil, errNotReady } - adjusted := t.Add(-overrides.EvaluationDelay(userID)) - res, err := evaluator.Eval(ctx, qs, adjusted) + res, err := evaluator.Eval(ctx, qs, t) if err != nil { level.Error(detailLog).Log("msg", "rule evaluation failed", "err", err) @@ -145,7 +144,7 @@ func MultiTenantRuleManager(cfg Config, evaluator Evaluator, overrides RulesLimi registry.configureTenantStorage(userID) logger = log.With(logger, "user", userID) - queryFn := queryFunc(evaluator, overrides, registry, userID, logger) + queryFn := queryFunc(evaluator, registry, userID, logger) memStore := NewMemStore(userID, queryFn, newMemstoreMetrics(reg), 5*time.Minute, log.With(logger, "subcomponent", "MemStore")) // GroupLoader builds a cache of the rules as they're loaded by the diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index ef109d4d432e3..55e77c2f18a2a 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -109,7 +109,7 @@ func TestNonMetricQuery(t *testing.T) { eval, err := NewLocalEvaluator(engine, log) require.NoError(t, err) - queryFunc := queryFunc(eval, overrides, fakeChecker{}, "fake", log) + queryFunc := queryFunc(eval, fakeChecker{}, "fake", log) _, err = queryFunc(context.TODO(), `{job="nginx"}`, time.Now()) require.Error(t, err, "rule result is not a vector or scalar") diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go index 8fac602638110..12dbfb8d9a7df 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go @@ -205,7 +205,7 @@ func newTestStore(t testing.TB, clientMetrics storage.ClientMetrics) *testStore t.Helper() servercfg := &ww.Config{} require.Nil(t, servercfg.LogLevel.Set("debug")) - util_log.InitLogger(servercfg, nil, true, false) + util_log.InitLogger(servercfg, nil, false) workdir := t.TempDir() filepath.Join(workdir, "index") indexDir := filepath.Join(workdir, "index") diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index 97377ef0275c4..7453b615118a0 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -31,8 +31,8 @@ var ( ) // InitLogger initialises the global gokit logger (util_log.Logger) and returns that logger. -func InitLogger(cfg *server.Config, reg prometheus.Registerer, buffered bool, sync bool) log.Logger { - logger := newPrometheusLogger(cfg.LogLevel, cfg.LogFormat, reg, buffered, sync) +func InitLogger(cfg *server.Config, reg prometheus.Registerer, sync bool) log.Logger { + logger := newPrometheusLogger(cfg.LogLevel, cfg.LogFormat, reg, sync) // when using util_log.Logger, skip 3 stack frames. Logger = log.With(logger, "caller", log.Caller(3)) @@ -113,7 +113,7 @@ func LevelHandler(currentLogLevel *dslog.Level) http.HandlerFunc { // newPrometheusLogger creates a new instance of PrometheusLogger which exposes // Prometheus counters for various log levels. -func newPrometheusLogger(l dslog.Level, format string, reg prometheus.Registerer, buffered bool, sync bool) log.Logger { +func newPrometheusLogger(l dslog.Level, format string, reg prometheus.Registerer, sync bool) log.Logger { // buffered logger settings var ( logEntries uint32 = 256 // buffer up to 256 log lines in memory before flushing to a write(2) syscall @@ -138,22 +138,16 @@ func newPrometheusLogger(l dslog.Level, format string, reg prometheus.Registerer Buckets: prometheus.ExponentialBuckets(1, 2, int(math.Log2(float64(logEntries)))+1), }) - var writer io.Writer - if buffered { - // retain a reference to this logger because it doesn't conform to the standard Logger interface, - // and we can't unwrap it to get the underlying logger when we flush on shutdown - bufferedLogger = dslog.NewBufferedLogger(os.Stderr, logEntries, - dslog.WithFlushPeriod(flushTimeout), - dslog.WithPrellocatedBuffer(logBufferSize), - dslog.WithFlushCallback(func(entries uint32) { - logFlushes.Observe(float64(entries)) - }), - ) - - writer = bufferedLogger - } else { - writer = os.Stderr - } + // retain a reference to this logger because it doesn't conform to the standard Logger interface, + // and we can't unwrap it to get the underlying logger when we flush on shutdown + bufferedLogger = dslog.NewBufferedLogger(os.Stderr, logEntries, + dslog.WithFlushPeriod(flushTimeout), + dslog.WithPrellocatedBuffer(logBufferSize), + dslog.WithFlushCallback(func(entries uint32) { + logFlushes.Observe(float64(entries)) + }), + ) + var writer io.Writer = bufferedLogger if sync { writer = log.NewSyncWriter(writer) diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 0600b9ebb5ffc..c7cc4395d8f8a 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -72,7 +72,6 @@ type Limits struct { RejectOldSamples bool `yaml:"reject_old_samples" json:"reject_old_samples"` RejectOldSamplesMaxAge model.Duration `yaml:"reject_old_samples_max_age" json:"reject_old_samples_max_age"` CreationGracePeriod model.Duration `yaml:"creation_grace_period" json:"creation_grace_period"` - EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"` MaxLineSize flagext.ByteSize `yaml:"max_line_size" json:"max_line_size"` MaxLineSizeTruncate bool `yaml:"max_line_size_truncate" json:"max_line_size_truncate"` IncrementDuplicateTimestamp bool `yaml:"increment_duplicate_timestamp" json:"increment_duplicate_timestamp"` @@ -112,9 +111,6 @@ type Limits struct { VolumeMaxSeries int `yaml:"volume_max_series" json:"volume_max_series" doc:"description=The maximum number of aggregated series in a log-volume response"` // Ruler defaults and limits. - - // TODO(chaudum): Remove deprecated setting in next major version - RulerEvaluationDelay model.Duration `yaml:"ruler_evaluation_delay_duration" json:"ruler_evaluation_delay_duration"` RulerMaxRulesPerRuleGroup int `yaml:"ruler_max_rules_per_rule_group" json:"ruler_max_rules_per_rule_group"` RulerMaxRuleGroupsPerTenant int `yaml:"ruler_max_rule_groups_per_tenant" json:"ruler_max_rule_groups_per_tenant"` RulerAlertManagerConfig *ruler_config.AlertManagerConfig `yaml:"ruler_alertmanager_config" json:"ruler_alertmanager_config" doc:"hidden"` @@ -222,7 +218,6 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.Var(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", "Maximum accepted sample age before rejecting.") _ = l.CreationGracePeriod.Set("10m") f.Var(&l.CreationGracePeriod, "validation.create-grace-period", "Duration which table will be created/deleted before/after it's needed; we won't accept sample from before this time.") - f.BoolVar(&l.EnforceMetricName, "validation.enforce-metric-name", true, "Enforce every sample has a metric name.") f.IntVar(&l.MaxEntriesLimitPerQuery, "validation.max-entries-limit", 5000, "Maximum number of log entries that will be returned for a query.") f.IntVar(&l.MaxLocalStreamsPerUser, "ingester.max-streams-per-user", 0, "Maximum number of active streams per user, per ingester. 0 to disable.") @@ -273,9 +268,6 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.") f.IntVar(&l.QueryReadyIndexNumDays, "store.query-ready-index-num-days", 0, "Number of days of index to be kept always downloaded for queries. Applies only to per user index in boltdb-shipper index store. 0 to disable.") - _ = l.RulerEvaluationDelay.Set("0s") - f.Var(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", "Deprecated. Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") - f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.") f.IntVar(&l.RulerMaxRuleGroupsPerTenant, "ruler.max-rule-groups-per-tenant", 0, "Maximum number of rule groups per-tenant. 0 to disable.") f.IntVar(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when shuffle-sharding is enabled in the ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") @@ -514,11 +506,6 @@ func (o *Overrides) MaxQueryParallelism(_ context.Context, userID string) int { return o.getOverridesForUser(userID).MaxQueryParallelism } -// EnforceMetricName whether to enforce the presence of a metric name. -func (o *Overrides) EnforceMetricName(userID string) bool { - return o.getOverridesForUser(userID).EnforceMetricName -} - // CardinalityLimit whether to enforce the presence of a metric name. func (o *Overrides) CardinalityLimit(userID string) int { return o.getOverridesForUser(userID).CardinalityLimit @@ -586,12 +573,6 @@ func (o *Overrides) MaxQueryLookback(_ context.Context, userID string) time.Dura return time.Duration(o.getOverridesForUser(userID).MaxQueryLookback) } -// Deprecated, can be removed in next major version -// EvaluationDelay returns the rules evaluation delay for a given user. -func (o *Overrides) EvaluationDelay(userID string) time.Duration { - return time.Duration(o.getOverridesForUser(userID).RulerEvaluationDelay) -} - // RulerTenantShardSize returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy. func (o *Overrides) RulerTenantShardSize(userID string) int { return o.getOverridesForUser(userID).RulerTenantShardSize diff --git a/tools/deprecated-config-checker/checker/checker_test.go b/tools/deprecated-config-checker/checker/checker_test.go index e02315821d4bb..efecefb1700f0 100644 --- a/tools/deprecated-config-checker/checker/checker_test.go +++ b/tools/deprecated-config-checker/checker/checker_test.go @@ -31,13 +31,15 @@ var ( "storage_config.boltdb_shipper.shared_store_key_prefix", "storage_config.tsdb_shipper.shared_store", "storage_config.tsdb_shipper.shared_store_key_prefix", + "compactor.deletion_mode", "compactor.shared_store", "compactor.shared_store_key_prefix", + "limits_config.enforce_metric_name", + "limits_config.ruler_evaluation_delay_duration", } expectedConfigDeprecates = []string{ "ruler.remote_write.client", - "compactor.deletion_mode", "index_gateway.ring.replication_factor", "storage_config.bigtable", "storage_config.cassandra", @@ -46,7 +48,6 @@ var ( "storage_config.aws.dynamodb", "chunk_store_config.write_dedupe_cache_config", "limits_config.unordered_writes", - "limits_config.ruler_evaluation_delay_duration", "limits_config.ruler_remote_write_url", "limits_config.ruler_remote_write_timeout", "limits_config.ruler_remote_write_headers", @@ -81,11 +82,15 @@ var ( "schema_config.configs.[8].object_store", } - expectedRuntimeConfigDeletes = []string{} + expectedRuntimeConfigDeletes = []string{ + "overrides.foo.ruler_evaluation_delay_duration", + "overrides.foo.enforce_metric_name", + "overrides.bar.ruler_evaluation_delay_duration", + "overrides.bar.enforce_metric_name", + } expectedRuntimeConfigDeprecates = []string{ "overrides.foo.unordered_writes", - "overrides.foo.ruler_evaluation_delay_duration", "overrides.foo.ruler_remote_write_url", "overrides.foo.ruler_remote_write_timeout", "overrides.foo.ruler_remote_write_headers", @@ -103,7 +108,6 @@ var ( "overrides.foo.per_tenant_override_period", "overrides.foo.allow_deletes", "overrides.bar.unordered_writes", - "overrides.bar.ruler_evaluation_delay_duration", "overrides.bar.ruler_remote_write_url", "overrides.bar.ruler_remote_write_timeout", "overrides.bar.ruler_remote_write_headers", diff --git a/tools/deprecated-config-checker/deleted-config.yaml b/tools/deprecated-config-checker/deleted-config.yaml index 322e861d70eed..148663283e862 100644 --- a/tools/deprecated-config-checker/deleted-config.yaml +++ b/tools/deprecated-config-checker/deleted-config.yaml @@ -38,8 +38,13 @@ storage_config: s3: *s3_deletes compactor: + deletion_mode: "Use global or per-tenant deletion_mode configuration from limits_config." shared_store: "Compactor will now operate on all the object stores configured in period config where the index type is either tsdb or boltdb-shipper. -compactor.delete-request-store to configure the store for delete requests is now a required field." shared_store_key_prefix: "Use -compactor.delete-request-store.key-prefix to configure the path prefix under which the delete requests are stored." chunk_store_config: max_look_back_period: "Use global or per-tenant max_query_lookback configuration from limits_config." + +limits_config: + ruler_evaluation_delay_duration: "This setting is removed." + enforce_metric_name: "This setting is removed." diff --git a/tools/deprecated-config-checker/deprecated-config.yaml b/tools/deprecated-config-checker/deprecated-config.yaml index 0cd8e8fd8c818..ab4c3c073d738 100644 --- a/tools/deprecated-config-checker/deprecated-config.yaml +++ b/tools/deprecated-config-checker/deprecated-config.yaml @@ -18,9 +18,6 @@ ruler: remote_write: client: "Use clients instead." -compactor: - deletion_mode: "Use global or per-tenant deletion_mode configuration from limits_config." - index_gateway: ring: replication_factor: "Use global or per-tenant index_gateway_shard_size configuration from limits_config." @@ -48,7 +45,6 @@ chunk_store_config: ## NOTE: This will also be used to validate per-tenant overrides. limits_config: unordered_writes: "Will be eventually removed." - ruler_evaluation_delay_duration: "Will be eventually removed." ruler_remote_write_url: "Use ruler_remote_write_config instead." ruler_remote_write_timeout: "Use ruler_remote_write_config instead." ruler_remote_write_headers: "Use ruler_remote_write_config instead." diff --git a/tools/deprecated-config-checker/test-fixtures/config.yaml b/tools/deprecated-config-checker/test-fixtures/config.yaml index 80d864188b0ff..d5a326c8647f3 100644 --- a/tools/deprecated-config-checker/test-fixtures/config.yaml +++ b/tools/deprecated-config-checker/test-fixtures/config.yaml @@ -43,7 +43,7 @@ compactor: working_directory: /tmp/loki/boltdb-shipper-active shared_store: gcs # DELETED shared_store_key_prefix: /index # DELETED - deletion_mode: "delete" # DEPRECATED + deletion_mode: "delete" # DELETED chunk_store_config: cache_lookups_older_than: 1h @@ -117,7 +117,8 @@ schema_config: limits_config: ingestion_rate_mb: 100 unordered_writes: true # DEPRECATED - ruler_evaluation_delay_duration: 1m # DEPRECATED + enforce_metric_name: true # DELETED + ruler_evaluation_delay_duration: 1m # DELETED ruler_remote_write_url: "push.123abc.net" # DEPRECATED ruler_remote_write_timeout: 1m # DEPRECATED ruler_remote_write_headers: ["foo", "bar"] # DEPRECATED diff --git a/tools/deprecated-config-checker/test-fixtures/runtime-config.yaml b/tools/deprecated-config-checker/test-fixtures/runtime-config.yaml index 9500ce719724a..c86dab51f26df 100644 --- a/tools/deprecated-config-checker/test-fixtures/runtime-config.yaml +++ b/tools/deprecated-config-checker/test-fixtures/runtime-config.yaml @@ -2,7 +2,8 @@ overrides: "foo": &tenant_overrides ingestion_rate_mb: 100 unordered_writes: true # DEPRECATED - ruler_evaluation_delay_duration: 1m # DEPRECATED + enforce_metric_name: true # DELETED + ruler_evaluation_delay_duration: 1m # DELETED ruler_remote_write_url: "push.123abc.net" # DEPRECATED ruler_remote_write_timeout: 1m # DEPRECATED ruler_remote_write_headers: [ "foo", "bar" ] # DEPRECATED diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go index 0f2f083f08068..66a7a72321bb3 100644 --- a/tools/doc-generator/parse/root_blocks.go +++ b/tools/doc-generator/parse/root_blocks.go @@ -122,7 +122,7 @@ var ( { Name: "compactor", StructType: []reflect.Type{reflect.TypeOf(compactor.Config{})}, - Desc: "The compactor block configures the compactor component, which compacts index shards for performance. `-boltdb.shipper.compactor.` prefix is deprecated, please use `-compactor.` instead.", + Desc: "The compactor block configures the compactor component, which compacts index shards for performance.", }, { Name: "bloom_compactor", diff --git a/tools/tsdb/helpers/setup.go b/tools/tsdb/helpers/setup.go index 95d599e704971..60a6121d616bb 100644 --- a/tools/tsdb/helpers/setup.go +++ b/tools/tsdb/helpers/setup.go @@ -45,7 +45,7 @@ func Setup() (loki.Config, services.Service, string, error) { } c.Config.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeReadOnly - util_log.InitLogger(&c.Server, prometheus.DefaultRegisterer, c.UseBufferedLogger, c.UseSyncLogger) + util_log.InitLogger(&c.Server, prometheus.DefaultRegisterer, false) c.Config.StorageConfig.TSDBShipperConfig.ActiveIndexDirectory = filepath.Join(dir, "tsdb-active") c.Config.StorageConfig.TSDBShipperConfig.CacheLocation = filepath.Join(dir, "tsdb-cache") diff --git a/tools/tsdb/migrate-versions/main.go b/tools/tsdb/migrate-versions/main.go index 4339916c4a676..b458c80d4c1b8 100644 --- a/tools/tsdb/migrate-versions/main.go +++ b/tools/tsdb/migrate-versions/main.go @@ -299,7 +299,7 @@ func setup() loki.Config { } serverCfg := &c.Server - serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.DefaultRegisterer, c.UseBufferedLogger, c.UseSyncLogger) + serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.DefaultRegisterer, false) if err := c.Validate(); err != nil { level.Error(util_log.Logger).Log("msg", "validating config", "err", err.Error())