From 32e9ee8209b6096fe07f19d888c2d64ba2a49462 Mon Sep 17 00:00:00 2001 From: Ashwanth Date: Tue, 14 Nov 2023 18:51:42 +0530 Subject: [PATCH 01/48] chore: follow-up to #11151 and #11025 (#11225) **What this PR does / why we need it**: - remove usage of `enforce_metric_name` in ksonnet - https://github.com/grafana/loki/pull/11151 - make `cortex_ruler_client_request_duration_seconds` prefix configurable - updates upgrade guide to remove reference to metrics that loki does not export **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --- docs/sources/setup/install/helm/reference.md | 1 - docs/sources/setup/upgrade/_index.md | 4 ---- pkg/ruler/base/client_pool.go | 20 ++++++++++--------- pkg/ruler/base/client_pool_test.go | 6 ++++-- production/docker/config/loki.yaml | 1 - production/helm/loki/values.yaml | 1 - .../loki-simple-scalable/example/main.jsonnet | 1 - production/ksonnet/loki/config.libsonnet | 1 - production/nomad/loki-distributed/config.yml | 1 - production/nomad/loki-simple/config.yml | 1 - production/nomad/loki/config.yml | 1 - 11 files changed, 15 insertions(+), 23 deletions(-) diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 2155ae9afc66f..833cc2c77edc8 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -2027,7 +2027,6 @@ null Limits config
 {
-  "enforce_metric_name": false,
   "max_cache_freshness_per_query": "10m",
   "reject_old_samples": true,
   "reject_old_samples_max_age": "168h",
diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md
index e76a3d1b191df..e9483e5219409 100644
--- a/docs/sources/setup/upgrade/_index.md
+++ b/docs/sources/setup/upgrade/_index.md
@@ -241,10 +241,6 @@ Some Loki metrics started with the prefix `cortex_`. In this release they will b
  - `cortex_query_scheduler_queue_duration_seconds_sum`
  - `cortex_query_scheduler_queue_length`
  - `cortex_query_scheduler_running`
- - `cortex_quota_cgroup_cpu_max`
- - `cortex_quota_cgroup_cpu_period`
- - `cortex_quota_cpu_count`
- - `cortex_quota_gomaxprocs`
  - `cortex_ring_member_heartbeats_total`
  - `cortex_ring_member_tokens_owned`
  - `cortex_ring_member_tokens_to_own`
diff --git a/pkg/ruler/base/client_pool.go b/pkg/ruler/base/client_pool.go
index ca2a3ac2d45f0..4a66fc935107e 100644
--- a/pkg/ruler/base/client_pool.go
+++ b/pkg/ruler/base/client_pool.go
@@ -48,15 +48,16 @@ func newRulerClientPool(clientCfg grpcclient.Config, logger log.Logger, reg prom
 	})
 
 	return &rulerClientsPool{
-		client.NewPool("ruler", poolCfg, nil, newRulerClientFactory(clientCfg, reg), clientsCount, logger),
+		client.NewPool("ruler", poolCfg, nil, newRulerClientFactory(clientCfg, reg, metricsNamespace), clientsCount, logger),
 	}
 }
 
-func newRulerClientFactory(clientCfg grpcclient.Config, reg prometheus.Registerer) client.PoolFactory {
+func newRulerClientFactory(clientCfg grpcclient.Config, reg prometheus.Registerer, metricsNamespace string) client.PoolFactory {
 	requestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
-		Name:    "cortex_ruler_client_request_duration_seconds",
-		Help:    "Time spent executing requests to the ruler.",
-		Buckets: prometheus.ExponentialBuckets(0.008, 4, 7),
+		Namespace: metricsNamespace,
+		Name:      "ruler_client_request_duration_seconds",
+		Help:      "Time spent executing requests to the ruler.",
+		Buckets:   prometheus.ExponentialBuckets(0.008, 4, 7),
 	}, []string{"operation", "status_code"})
 
 	return client.PoolAddrFunc(func(addr string) (client.PoolClient, error) {
@@ -64,11 +65,12 @@ func newRulerClientFactory(clientCfg grpcclient.Config, reg prometheus.Registere
 	})
 }
 
-func newRulerPoolClient(clientCfg grpcclient.Config, reg prometheus.Registerer) func(addr string) (client.PoolClient, error) {
+func newRulerPoolClient(clientCfg grpcclient.Config, reg prometheus.Registerer, metricsNamespace string) func(addr string) (client.PoolClient, error) {
 	requestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
-		Name:    "cortex_ruler_client_request_duration_seconds",
-		Help:    "Time spent executing requests to the ruler.",
-		Buckets: prometheus.ExponentialBuckets(0.008, 4, 7),
+		Namespace: metricsNamespace,
+		Name:      "ruler_client_request_duration_seconds",
+		Help:      "Time spent executing requests to the ruler.",
+		Buckets:   prometheus.ExponentialBuckets(0.008, 4, 7),
 	}, []string{"operation", "status_code"})
 
 	return func(addr string) (client.PoolClient, error) {
diff --git a/pkg/ruler/base/client_pool_test.go b/pkg/ruler/base/client_pool_test.go
index 3e296cc116c17..05fc23290033c 100644
--- a/pkg/ruler/base/client_pool_test.go
+++ b/pkg/ruler/base/client_pool_test.go
@@ -13,6 +13,8 @@ import (
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"google.golang.org/grpc"
+
+	"github.com/grafana/loki/pkg/util/constants"
 )
 
 func Test_newRulerClientFactory(t *testing.T) {
@@ -36,7 +38,7 @@ func Test_newRulerClientFactory(t *testing.T) {
 	flagext.DefaultValues(&cfg)
 
 	reg := prometheus.NewPedanticRegistry()
-	factory := newRulerPoolClient(cfg, reg)
+	factory := newRulerPoolClient(cfg, reg, constants.Loki)
 
 	for i := 0; i < 2; i++ {
 		client, err := factory(listener.Addr().String())
@@ -54,7 +56,7 @@ func Test_newRulerClientFactory(t *testing.T) {
 	require.NoError(t, err)
 
 	assert.Len(t, metrics, 1)
-	assert.Equal(t, "cortex_ruler_client_request_duration_seconds", metrics[0].GetName())
+	assert.Equal(t, "loki_ruler_client_request_duration_seconds", metrics[0].GetName())
 	assert.Equal(t, dto.MetricType_HISTOGRAM, metrics[0].GetType())
 	assert.Len(t, metrics[0].GetMetric(), 1)
 	assert.Equal(t, uint64(2), metrics[0].GetMetric()[0].GetHistogram().GetSampleCount())
diff --git a/production/docker/config/loki.yaml b/production/docker/config/loki.yaml
index 7d7346cfc63b7..e6a2f5fe31d84 100644
--- a/production/docker/config/loki.yaml
+++ b/production/docker/config/loki.yaml
@@ -89,7 +89,6 @@ schema_config:
 
 limits_config:
   max_cache_freshness_per_query: '10m'
-  enforce_metric_name: false
   reject_old_samples: true
   reject_old_samples_max_age: 30m
   ingestion_rate_mb: 10
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index 92b7069af39f2..de6048aecc712 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -254,7 +254,6 @@ loki:
     grpc_listen_port: 9095
   # -- Limits config
   limits_config:
-    enforce_metric_name: false
     reject_old_samples: true
     reject_old_samples_max_age: 168h
     max_cache_freshness_per_query: 10m
diff --git a/production/ksonnet/loki-simple-scalable/example/main.jsonnet b/production/ksonnet/loki-simple-scalable/example/main.jsonnet
index ebf7b5cf288a8..66a0d185f44d0 100644
--- a/production/ksonnet/loki-simple-scalable/example/main.jsonnet
+++ b/production/ksonnet/loki-simple-scalable/example/main.jsonnet
@@ -38,7 +38,6 @@ loki {
         },
       },
       limits_config: {
-        enforce_metric_name: false,
         reject_old_samples_max_age: '168h',  //1 week
         max_global_streams_per_user: 60000,
         ingestion_rate_mb: 75,
diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet
index 8450e524fd1ee..20cd6ad1fe419 100644
--- a/production/ksonnet/loki/config.libsonnet
+++ b/production/ksonnet/loki/config.libsonnet
@@ -208,7 +208,6 @@
         query_ingesters_within: '2h',  // twice the max-chunk age (1h default) for safety buffer
       },
       limits_config: {
-        enforce_metric_name: false,
         // align middleware parallelism with shard factor to optimize one-legged sharded queries.
         max_query_parallelism: if $._config.queryFrontend.sharded_queries_enabled then
           // For a sharding factor of 16 (default), this is 256, or enough for 16 sharded queries.
diff --git a/production/nomad/loki-distributed/config.yml b/production/nomad/loki-distributed/config.yml
index 2391ff1afed0b..48fc8e166c688 100644
--- a/production/nomad/loki-distributed/config.yml
+++ b/production/nomad/loki-distributed/config.yml
@@ -122,6 +122,5 @@ ruler:
     dir: {{ env "NOMAD_ALLOC_DIR" }}/data/ruler
 
 limits_config:
-  enforce_metric_name: false
   reject_old_samples: true
   reject_old_samples_max_age: 168h
diff --git a/production/nomad/loki-simple/config.yml b/production/nomad/loki-simple/config.yml
index d0883b2dfa6ae..79b1d39d57a92 100644
--- a/production/nomad/loki-simple/config.yml
+++ b/production/nomad/loki-simple/config.yml
@@ -50,7 +50,6 @@ storage_config:
     s3forcepathstyle: true
 
 limits_config:
-  enforce_metric_name: false
   reject_old_samples: true
   reject_old_samples_max_age: 168h
 
diff --git a/production/nomad/loki/config.yml b/production/nomad/loki/config.yml
index 1f1e24701925a..ceeda7d2e49ef 100644
--- a/production/nomad/loki/config.yml
+++ b/production/nomad/loki/config.yml
@@ -50,7 +50,6 @@ storage_config:
     s3forcepathstyle: true
 
 limits_config:
-  enforce_metric_name: false
   reject_old_samples: true
   reject_old_samples_max_age: 168h
 

From 3a7b5d246b0153ac9f36a0848abdd9401dd68a37 Mon Sep 17 00:00:00 2001
From: Karsten Jeschkies 
Date: Tue, 14 Nov 2023 16:17:51 +0100
Subject: [PATCH 02/48] Serialize query AST to JSON. (#11123)

**What this PR does / why we need it**:
This introduces the visitor pattern to serialize the LogQL AST to JSON.
We've chose this pattern because it will be more flexible one the AST is
encoded into Protobuf and an actual queryplan.

**Checklist**
- [ ] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [ ] Documentation added
- [x] Tests updated
- [ ] `CHANGELOG.md` updated
- [ ] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
- [ ] If the change is deprecating or removing a configuration option,
update the `deprecated-config.yaml` and `deleted-config.yaml` files
respectively in the `tools/deprecated-config-checker` directory.
[Example
PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
---
 pkg/logql/log/ip.go                |   30 +-
 pkg/logql/log/label_filter.go      |   44 +-
 pkg/logql/syntax/ast.go            |  110 +-
 pkg/logql/syntax/parser_test.go    | 4792 ++++++++++++++--------------
 pkg/logql/syntax/serialize.go      |  878 +++++
 pkg/logql/syntax/serialize_test.go |   93 +
 pkg/logql/syntax/walk.go           |  119 +
 7 files changed, 3632 insertions(+), 2434 deletions(-)
 create mode 100644 pkg/logql/syntax/serialize.go
 create mode 100644 pkg/logql/syntax/serialize_test.go

diff --git a/pkg/logql/log/ip.go b/pkg/logql/log/ip.go
index cd803e820c10c..1508432d245c5 100644
--- a/pkg/logql/log/ip.go
+++ b/pkg/logql/log/ip.go
@@ -78,39 +78,41 @@ func (f *IPLineFilter) filterTy(line []byte, ty labels.MatchType) bool {
 
 type IPLabelFilter struct {
 	ip *ipFilter
-	ty LabelFilterType
+	Ty LabelFilterType
 
-	// if used as label matcher, this holds the identifier label name.
+	// if used as Label matcher, this holds the identifier Label name.
 	// e.g: (|remote_addr = ip("xxx")). Here labelName is `remote_addr`
-	label string
+	Label string
 
 	// patError records if given pattern is invalid.
 	patError error
 
-	// local copy of pattern to display it in errors, even though pattern matcher fails because of invalid pattern.
-	pattern string
+	// local copy of Pattern to display it in errors, even though Pattern matcher fails because of invalid Pattern.
+	Pattern string
 }
 
 // NewIPLabelFilter is used to construct ip filter as label filter for the given `label`.
-func NewIPLabelFilter(pattern string, label string, ty LabelFilterType) *IPLabelFilter {
+func NewIPLabelFilter(pattern, label string, ty LabelFilterType) *IPLabelFilter {
 	ip, err := newIPFilter(pattern)
 	return &IPLabelFilter{
 		ip:       ip,
-		label:    label,
-		ty:       ty,
+		Label:    label,
+		Ty:       ty,
 		patError: err,
-		pattern:  pattern,
+		Pattern:  pattern,
 	}
 }
 
 // `Process` implements `Stage` interface
 func (f *IPLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) {
-	return line, f.filterTy(line, f.ty, lbs)
+	return line, f.filterTy(line, f.Ty, lbs)
 }
 
+func (f *IPLabelFilter) isLabelFilterer() {}
+
 // `RequiredLabelNames` implements `Stage` interface
 func (f *IPLabelFilter) RequiredLabelNames() []string {
-	return []string{f.label}
+	return []string{f.Label}
 }
 
 // PatternError will be used `labelFilter.Stage()` method so that, if the given pattern is wrong
@@ -124,7 +126,7 @@ func (f *IPLabelFilter) filterTy(_ []byte, ty LabelFilterType, lbs *LabelsBuilde
 		// why `true`?. if there's an error only the string matchers can filter out.
 		return true
 	}
-	input, ok := lbs.Get(f.label)
+	input, ok := lbs.Get(f.Label)
 	if !ok {
 		// we have not found the label.
 		return false
@@ -146,11 +148,11 @@ func (f *IPLabelFilter) filterTy(_ []byte, ty LabelFilterType, lbs *LabelsBuilde
 // `String` implements fmt.Stringer inteface, by which also implements `LabelFilterer` inteface.
 func (f *IPLabelFilter) String() string {
 	eq := "=" // LabelFilterEqual -> "==", we don't want in string representation of ip label filter.
-	if f.ty == LabelFilterNotEqual {
+	if f.Ty == LabelFilterNotEqual {
 		eq = LabelFilterNotEqual.String()
 	}
 
-	return fmt.Sprintf("%s%sip(%q)", f.label, eq, f.pattern) // label filter
+	return fmt.Sprintf("%s%sip(%q)", f.Label, eq, f.Pattern) // label filter
 }
 
 // ipFilter search for IP addresses of given `pattern` in the given `line`.
diff --git a/pkg/logql/log/label_filter.go b/pkg/logql/log/label_filter.go
index a056d8e16ba00..e3bb1a4bcd5b8 100644
--- a/pkg/logql/log/label_filter.go
+++ b/pkg/logql/log/label_filter.go
@@ -54,15 +54,20 @@ func (f LabelFilterType) String() string {
 }
 
 // LabelFilterer can filter extracted labels.
+//
+//sumtype:decl
 type LabelFilterer interface {
 	Stage
 	fmt.Stringer
+
+	// Seal trait
+	isLabelFilterer()
 }
 
 type BinaryLabelFilter struct {
 	Left  LabelFilterer
 	Right LabelFilterer
-	and   bool
+	And   bool
 }
 
 // NewAndLabelFilter creates a new LabelFilterer from a and binary operation of two LabelFilterer.
@@ -70,7 +75,7 @@ func NewAndLabelFilter(left LabelFilterer, right LabelFilterer) *BinaryLabelFilt
 	return &BinaryLabelFilter{
 		Left:  left,
 		Right: right,
-		and:   true,
+		And:   true,
 	}
 }
 
@@ -84,16 +89,18 @@ func NewOrLabelFilter(left LabelFilterer, right LabelFilterer) *BinaryLabelFilte
 
 func (b *BinaryLabelFilter) Process(ts int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) {
 	line, lok := b.Left.Process(ts, line, lbs)
-	if !b.and && lok {
+	if !b.And && lok {
 		return line, true
 	}
 	line, rok := b.Right.Process(ts, line, lbs)
-	if !b.and {
+	if !b.And {
 		return line, lok || rok
 	}
 	return line, lok && rok
 }
 
+func (b *BinaryLabelFilter) isLabelFilterer() {}
+
 func (b *BinaryLabelFilter) RequiredLabelNames() []string {
 	var names []string
 	names = append(names, b.Left.RequiredLabelNames()...)
@@ -105,7 +112,7 @@ func (b *BinaryLabelFilter) String() string {
 	var sb strings.Builder
 	sb.WriteString("( ")
 	sb.WriteString(b.Left.String())
-	if b.and {
+	if b.And {
 		sb.WriteString(" , ")
 	} else {
 		sb.WriteString(" or ")
@@ -122,6 +129,9 @@ type NoopLabelFilter struct {
 func (NoopLabelFilter) Process(_ int64, line []byte, _ *LabelsBuilder) ([]byte, bool) {
 	return line, true
 }
+
+func (NoopLabelFilter) isLabelFilterer() {}
+
 func (NoopLabelFilter) RequiredLabelNames() []string { return []string{} }
 
 func (f NoopLabelFilter) String() string {
@@ -197,6 +207,8 @@ func (d *BytesLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]
 	}
 }
 
+func (d *BytesLabelFilter) isLabelFilterer() {}
+
 func (d *BytesLabelFilter) RequiredLabelNames() []string {
 	return []string{d.Name}
 }
@@ -207,7 +219,7 @@ func (d *BytesLabelFilter) String() string {
 			return -1
 		}
 		return r
-	}, humanize.Bytes(d.Value))
+	}, humanize.Bytes(d.Value)) // TODO: discuss whether this should just be bytes, B, to be more accurate.
 	return fmt.Sprintf("%s%s%s", d.Name, d.Type, b)
 }
 
@@ -262,6 +274,8 @@ func (d *DurationLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder)
 	}
 }
 
+func (d *DurationLabelFilter) isLabelFilterer() {}
+
 func (d *DurationLabelFilter) RequiredLabelNames() []string {
 	return []string{d.Name}
 }
@@ -323,6 +337,8 @@ func (n *NumericLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) (
 
 }
 
+func (n *NumericLabelFilter) isLabelFilterer() {}
+
 func (n *NumericLabelFilter) RequiredLabelNames() []string {
 	return []string{n.Name}
 }
@@ -348,7 +364,7 @@ func NewStringLabelFilter(m *labels.Matcher) LabelFilterer {
 		return &NoopLabelFilter{m}
 	}
 
-	return &lineFilterLabelFilter{
+	return &LineFilterLabelFilter{
 		Matcher: m,
 		filter:  f,
 	}
@@ -358,18 +374,20 @@ func (s *StringLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([
 	return line, s.Matches(labelValue(s.Name, lbs))
 }
 
+func (s *StringLabelFilter) isLabelFilterer() {}
+
 func (s *StringLabelFilter) RequiredLabelNames() []string {
 	return []string{s.Name}
 }
 
-// lineFilterLabelFilter filters the desired label using an optimized line filter
-type lineFilterLabelFilter struct {
+// LineFilterLabelFilter filters the desired label using an optimized line filter
+type LineFilterLabelFilter struct {
 	*labels.Matcher
 	filter Filterer
 }
 
 // overrides the matcher.String() function in case there is a regexpFilter
-func (s *lineFilterLabelFilter) String() string {
+func (s *LineFilterLabelFilter) String() string {
 	if unwrappedFilter, ok := s.filter.(regexpFilter); ok {
 		rStr := unwrappedFilter.String()
 		str := fmt.Sprintf("%s%s`%s`", s.Matcher.Name, s.Matcher.Type, rStr)
@@ -378,12 +396,14 @@ func (s *lineFilterLabelFilter) String() string {
 	return s.Matcher.String()
 }
 
-func (s *lineFilterLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) {
+func (s *LineFilterLabelFilter) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) {
 	v := labelValue(s.Name, lbs)
 	return line, s.filter.Filter(unsafeGetBytes(v))
 }
 
-func (s *lineFilterLabelFilter) RequiredLabelNames() []string {
+func (s *LineFilterLabelFilter) isLabelFilterer() {}
+
+func (s *LineFilterLabelFilter) RequiredLabelNames() []string {
 	return []string{s.Name}
 }
 
diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go
index 4e251022860e5..95009df3a4689 100644
--- a/pkg/logql/syntax/ast.go
+++ b/pkg/logql/syntax/ast.go
@@ -29,6 +29,7 @@ type Expr interface {
 	Shardable() bool // A recursive check on the AST to see if it's shardable.
 	Walkable
 	fmt.Stringer
+	AcceptVisitor
 
 	// Pretty prettyfies any LogQL expression at given `level` of the whole LogQL query.
 	Pretty(level int) string
@@ -53,11 +54,15 @@ type implicit struct{}
 func (implicit) logQLExpr() {}
 
 // LogSelectorExpr is a LogQL expression filtering and returning logs.
+//
+//sumtype:decl
 type LogSelectorExpr interface {
 	Matchers() []*labels.Matcher
-	LogPipelineExpr
+	Pipeline() (Pipeline, error)
 	HasFilter() bool
 	Expr
+
+	isLogSelectorExpr()
 }
 
 // Type alias for backward compatibility
@@ -66,19 +71,17 @@ type (
 	SampleExtractor = log.SampleExtractor
 )
 
-// LogPipelineExpr is an expression defining a log pipeline.
-type LogPipelineExpr interface {
-	Pipeline() (Pipeline, error)
-	Expr
-}
-
 // StageExpr is an expression defining a single step into a log pipeline
+//
+//sumtype:decl
 type StageExpr interface {
 	Stage() (log.Stage, error)
 	Expr
+
+	isStageExpr()
 }
 
-// MultiStageExpr is multiple stages which implement a PipelineExpr.
+// MultiStageExpr is multiple stages which implements a LogSelectorExpr.
 type MultiStageExpr []StageExpr
 
 func (m MultiStageExpr) Pipeline() (log.Pipeline, error) {
@@ -196,6 +199,8 @@ func newMatcherExpr(matchers []*labels.Matcher) *MatchersExpr {
 	return &MatchersExpr{Mts: matchers}
 }
 
+func (e *MatchersExpr) isLogSelectorExpr() {}
+
 func (e *MatchersExpr) Matchers() []*labels.Matcher {
 	return e.Mts
 }
@@ -208,6 +213,8 @@ func (e *MatchersExpr) Shardable() bool { return true }
 
 func (e *MatchersExpr) Walk(f WalkFn) { f(e) }
 
+func (e *MatchersExpr) Accept(v RootVisitor) { v.VisitMatchers(e) }
+
 func (e *MatchersExpr) String() string {
 	var sb strings.Builder
 	sb.WriteString("{")
@@ -242,6 +249,8 @@ func newPipelineExpr(left *MatchersExpr, pipeline MultiStageExpr) LogSelectorExp
 	}
 }
 
+func (e *PipelineExpr) isLogSelectorExpr() {}
+
 func (e *PipelineExpr) Shardable() bool {
 	for _, p := range e.MultiStages {
 		if !p.Shardable() {
@@ -266,6 +275,8 @@ func (e *PipelineExpr) Walk(f WalkFn) {
 	walkAll(f, xs...)
 }
 
+func (e *PipelineExpr) Accept(v RootVisitor) { v.VisitPipeline(e) }
+
 func (e *PipelineExpr) Matchers() []*labels.Matcher {
 	return e.Left.Matchers()
 }
@@ -333,6 +344,8 @@ func newNestedLineFilterExpr(left *LineFilterExpr, right *LineFilterExpr) *LineF
 	}
 }
 
+func (*LineFilterExpr) isStageExpr() {}
+
 func (e *LineFilterExpr) Walk(f WalkFn) {
 	f(e)
 	if e.Left == nil {
@@ -341,6 +354,10 @@ func (e *LineFilterExpr) Walk(f WalkFn) {
 	e.Left.Walk(f)
 }
 
+func (e *LineFilterExpr) Accept(v RootVisitor) {
+	v.VisitLineFilter(e)
+}
+
 // AddFilterExpr adds a filter expression to a logselector expression.
 func AddFilterExpr(expr LogSelectorExpr, ty labels.MatchType, op, match string) (LogSelectorExpr, error) {
 	filter := newLineFilterExpr(ty, op, match)
@@ -471,10 +488,14 @@ func newLogfmtParserExpr(flags []string) *LogfmtParserExpr {
 	return &e
 }
 
+func (*LogfmtParserExpr) isStageExpr() {}
+
 func (e *LogfmtParserExpr) Shardable() bool { return true }
 
 func (e *LogfmtParserExpr) Walk(f WalkFn) { f(e) }
 
+func (e *LogfmtParserExpr) Accept(v RootVisitor) { v.VisitLogfmtParser(e) }
+
 func (e *LogfmtParserExpr) Stage() (log.Stage, error) {
 	return log.NewLogfmtParser(e.Strict, e.KeepEmpty), nil
 }
@@ -524,10 +545,14 @@ func newLabelParserExpr(op, param string) *LabelParserExpr {
 	}
 }
 
+func (*LabelParserExpr) isStageExpr() {}
+
 func (e *LabelParserExpr) Shardable() bool { return true }
 
 func (e *LabelParserExpr) Walk(f WalkFn) { f(e) }
 
+func (e *LabelParserExpr) Accept(v RootVisitor) { v.VisitLabelParser(e) }
+
 func (e *LabelParserExpr) Stage() (log.Stage, error) {
 	switch e.Op {
 	case OpParserTypeJSON:
@@ -569,10 +594,14 @@ func newLabelFilterExpr(filterer log.LabelFilterer) *LabelFilterExpr {
 	}
 }
 
+func (*LabelFilterExpr) isStageExpr() {}
+
 func (e *LabelFilterExpr) Shardable() bool { return true }
 
 func (e *LabelFilterExpr) Walk(f WalkFn) { f(e) }
 
+func (e *LabelFilterExpr) Accept(v RootVisitor) { v.VisitLabelFilter(e) }
+
 func (e *LabelFilterExpr) Stage() (log.Stage, error) {
 	switch ip := e.LabelFilterer.(type) {
 	case *log.IPLabelFilter:
@@ -606,6 +635,8 @@ func newDecolorizeExpr() *DecolorizeExpr {
 	return &DecolorizeExpr{}
 }
 
+func (*DecolorizeExpr) isStageExpr() {}
+
 func (e *DecolorizeExpr) Shardable() bool { return true }
 
 func (e *DecolorizeExpr) Stage() (log.Stage, error) {
@@ -616,6 +647,8 @@ func (e *DecolorizeExpr) String() string {
 }
 func (e *DecolorizeExpr) Walk(f WalkFn) { f(e) }
 
+func (e *DecolorizeExpr) Accept(v RootVisitor) { v.VisitDecolorize(e) }
+
 type DropLabelsExpr struct {
 	dropLabels []log.DropLabel
 	implicit
@@ -625,6 +658,8 @@ func newDropLabelsExpr(dropLabels []log.DropLabel) *DropLabelsExpr {
 	return &DropLabelsExpr{dropLabels: dropLabels}
 }
 
+func (*DropLabelsExpr) isStageExpr() {}
+
 func (e *DropLabelsExpr) Shardable() bool { return true }
 
 func (e *DropLabelsExpr) Stage() (log.Stage, error) {
@@ -654,6 +689,8 @@ func (e *DropLabelsExpr) String() string {
 }
 func (e *DropLabelsExpr) Walk(f WalkFn) { f(e) }
 
+func (e *DropLabelsExpr) Accept(v RootVisitor) { v.VisitDropLabels(e) }
+
 type KeepLabelsExpr struct {
 	keepLabels []log.KeepLabel
 	implicit
@@ -663,6 +700,8 @@ func newKeepLabelsExpr(keepLabels []log.KeepLabel) *KeepLabelsExpr {
 	return &KeepLabelsExpr{keepLabels: keepLabels}
 }
 
+func (*KeepLabelsExpr) isStageExpr() {}
+
 func (e *KeepLabelsExpr) Shardable() bool { return true }
 
 func (e *KeepLabelsExpr) Stage() (log.Stage, error) {
@@ -694,10 +733,16 @@ func (e *KeepLabelsExpr) String() string {
 
 func (e *KeepLabelsExpr) Walk(f WalkFn) { f(e) }
 
+func (e *KeepLabelsExpr) Accept(v RootVisitor) { v.VisitKeepLabel(e) }
+
+func (*LineFmtExpr) isStageExpr() {}
+
 func (e *LineFmtExpr) Shardable() bool { return true }
 
 func (e *LineFmtExpr) Walk(f WalkFn) { f(e) }
 
+func (e *LineFmtExpr) Accept(v RootVisitor) { v.VisitLineFmt(e) }
+
 func (e *LineFmtExpr) Stage() (log.Stage, error) {
 	return log.NewFormatter(e.Value)
 }
@@ -717,6 +762,8 @@ func newLabelFmtExpr(fmts []log.LabelFmt) *LabelFmtExpr {
 	}
 }
 
+func (*LabelFmtExpr) isStageExpr() {}
+
 func (e *LabelFmtExpr) Shardable() bool {
 	// While LabelFmt is shardable in certain cases, it is not always,
 	// but this is left to the shardmapper to determine
@@ -725,6 +772,8 @@ func (e *LabelFmtExpr) Shardable() bool {
 
 func (e *LabelFmtExpr) Walk(f WalkFn) { f(e) }
 
+func (e *LabelFmtExpr) Accept(v RootVisitor) { v.VisitLabelFmt(e) }
+
 func (e *LabelFmtExpr) Stage() (log.Stage, error) {
 	return log.NewLabelsFormatter(e.Formats)
 }
@@ -761,10 +810,14 @@ func newJSONExpressionParser(expressions []log.LabelExtractionExpr) *JSONExpress
 	}
 }
 
+func (*JSONExpressionParser) isStageExpr() {}
+
 func (j *JSONExpressionParser) Shardable() bool { return true }
 
 func (j *JSONExpressionParser) Walk(f WalkFn) { f(j) }
 
+func (j *JSONExpressionParser) Accept(v RootVisitor) { v.VisitJSONExpressionParser(j) }
+
 func (j *JSONExpressionParser) Stage() (log.Stage, error) {
 	return log.NewJSONExpressionParser(j.Expressions)
 }
@@ -813,10 +866,14 @@ func newLogfmtExpressionParser(expressions []log.LabelExtractionExpr, flags []st
 	return &e
 }
 
+func (*LogfmtExpressionParser) isStageExpr() {}
+
 func (l *LogfmtExpressionParser) Shardable() bool { return true }
 
 func (l *LogfmtExpressionParser) Walk(f WalkFn) { f(l) }
 
+func (l *LogfmtExpressionParser) Accept(v RootVisitor) { v.VisitLogfmtExpressionParser(l) }
+
 func (l *LogfmtExpressionParser) Stage() (log.Stage, error) {
 	return log.NewLogfmtExpressionParser(l.Expressions, l.Strict)
 }
@@ -942,6 +999,10 @@ func (r *LogRange) Walk(f WalkFn) {
 	r.Left.Walk(f)
 }
 
+func (r *LogRange) Accept(v RootVisitor) {
+	v.VisitLogRange(r)
+}
+
 // WithoutUnwrap returns a copy of the log range without the unwrap statement.
 func (r *LogRange) WithoutUnwrap() (*LogRange, error) {
 	left, err := Clone(r.Left)
@@ -1101,12 +1162,15 @@ func IsLogicalBinOp(op string) bool {
 }
 
 // SampleExpr is a LogQL expression filtering logs and returning metric samples.
+//
+//sumtype:decl
 type SampleExpr interface {
 	// Selector is the LogQL selector to apply when retrieving logs.
 	Selector() (LogSelectorExpr, error)
 	Extractor() (SampleExtractor, error)
 	MatcherGroups() ([]MatcherRange, error)
 	Expr
+	isSampleExpr()
 }
 
 // RangeAggregationExpr not all range vector aggregation expressions support grouping by/without label(s),
@@ -1150,6 +1214,7 @@ func newRangeAggregationExpr(left *LogRange, operation string, gr *Grouping, str
 	}
 	return e
 }
+func (e *RangeAggregationExpr) isSampleExpr() {}
 
 func (e *RangeAggregationExpr) Selector() (LogSelectorExpr, error) {
 	if e.err != nil {
@@ -1235,6 +1300,8 @@ func (e *RangeAggregationExpr) Walk(f WalkFn) {
 	e.Left.Walk(f)
 }
 
+func (e *RangeAggregationExpr) Accept(v RootVisitor) { v.VisitRangeAggregation(e) }
+
 // Grouping struct represents the grouping by/without label(s) for vector aggregators and range vector aggregators.
 // The representation is as follows:
 //   - No Grouping (labels dismissed):  () => Grouping{Without: false, Groups: nil}
@@ -1278,11 +1345,11 @@ func (g Grouping) Singleton() bool {
 // VectorAggregationExpr all vector aggregation expressions support grouping by/without label(s),
 // therefore the Grouping struct can never be nil.
 type VectorAggregationExpr struct {
-	Left SampleExpr
+	Left SampleExpr `json:"sample_expr"`
 
-	Grouping  *Grouping
-	Params    int
-	Operation string
+	Grouping  *Grouping `json:"grouping,omitempty"`
+	Params    int       `json:"params"`
+	Operation string    `json:"operation"`
 	err       error
 	implicit
 }
@@ -1319,6 +1386,8 @@ func mustNewVectorAggregationExpr(left SampleExpr, operation string, gr *Groupin
 	}
 }
 
+func (e *VectorAggregationExpr) isSampleExpr() {}
+
 func (e *VectorAggregationExpr) MatcherGroups() ([]MatcherRange, error) {
 	if e.err != nil {
 		return nil, e.err
@@ -1438,6 +1507,8 @@ func (e *VectorAggregationExpr) Walk(f WalkFn) {
 	e.Left.Walk(f)
 }
 
+func (e *VectorAggregationExpr) Accept(v RootVisitor) { v.VisitVectorAggregation(e) }
+
 // VectorMatchCardinality describes the cardinality relationship
 // of two Vectors in a binary operation.
 type VectorMatchCardinality int
@@ -1553,6 +1624,8 @@ func (e *BinOpExpr) Walk(f WalkFn) {
 	walkAll(f, e.SampleExpr, e.RHS)
 }
 
+func (e *BinOpExpr) Accept(v RootVisitor) { v.VisitBinOp(e) }
+
 func mustNewBinOpExpr(op string, opts *BinOpOptions, lhs, rhs Expr) SampleExpr {
 	left, ok := lhs.(SampleExpr)
 	if !ok {
@@ -1852,7 +1925,7 @@ func MergeBinOp(op string, left, right *promql.Sample, swap, filter, isVectorCom
 }
 
 type LiteralExpr struct {
-	Val float64
+	Val float64 `json:"val"`
 	err error
 	implicit
 }
@@ -1880,10 +1953,13 @@ func (e *LiteralExpr) String() string {
 // literlExpr impls SampleExpr & LogSelectorExpr mainly to reduce the need for more complicated typings
 // to facilitate sum types. We'll be type switching when evaluating them anyways
 // and they will only be present in binary operation legs.
+func (e *LiteralExpr) isSampleExpr()                           {}
+func (e *LiteralExpr) isLogSelectorExpr()                      {}
 func (e *LiteralExpr) Selector() (LogSelectorExpr, error)      { return e, e.err }
 func (e *LiteralExpr) HasFilter() bool                         { return false }
 func (e *LiteralExpr) Shardable() bool                         { return true }
 func (e *LiteralExpr) Walk(f WalkFn)                           { f(e) }
+func (e *LiteralExpr) Accept(v RootVisitor)                    { v.VisitLiteral(e) }
 func (e *LiteralExpr) Pipeline() (log.Pipeline, error)         { return log.NewNoopPipeline(), nil }
 func (e *LiteralExpr) Matchers() []*labels.Matcher             { return nil }
 func (e *LiteralExpr) MatcherGroups() ([]MatcherRange, error)  { return nil, e.err }
@@ -1945,6 +2021,8 @@ func mustNewLabelReplaceExpr(left SampleExpr, dst, replacement, src, regex strin
 	}
 }
 
+func (e *LabelReplaceExpr) isSampleExpr() {}
+
 func (e *LabelReplaceExpr) Selector() (LogSelectorExpr, error) {
 	if e.err != nil {
 		return nil, e.err
@@ -1978,6 +2056,8 @@ func (e *LabelReplaceExpr) Walk(f WalkFn) {
 	e.Left.Walk(f)
 }
 
+func (e *LabelReplaceExpr) Accept(v RootVisitor) { v.VisitLabelReplace(e) }
+
 func (e *LabelReplaceExpr) String() string {
 	var sb strings.Builder
 	sb.WriteString(OpLabelReplace)
@@ -2078,6 +2158,9 @@ func NewVectorExpr(scalar string) *VectorExpr {
 	}
 }
 
+func (e *VectorExpr) isSampleExpr()      {}
+func (e *VectorExpr) isLogSelectorExpr() {}
+
 func (e *VectorExpr) Err() error {
 	return e.err
 }
@@ -2102,6 +2185,7 @@ func (e *VectorExpr) Selector() (LogSelectorExpr, error)      { return e, e.err
 func (e *VectorExpr) HasFilter() bool                         { return false }
 func (e *VectorExpr) Shardable() bool                         { return false }
 func (e *VectorExpr) Walk(f WalkFn)                           { f(e) }
+func (e *VectorExpr) Accept(v RootVisitor)                    { v.VisitVector(e) }
 func (e *VectorExpr) Pipeline() (log.Pipeline, error)         { return log.NewNoopPipeline(), nil }
 func (e *VectorExpr) Matchers() []*labels.Matcher             { return nil }
 func (e *VectorExpr) MatcherGroups() ([]MatcherRange, error)  { return nil, e.err }
diff --git a/pkg/logql/syntax/parser_test.go b/pkg/logql/syntax/parser_test.go
index c0c39a7d8f137..cd45b6ec74c1e 100644
--- a/pkg/logql/syntax/parser_test.go
+++ b/pkg/logql/syntax/parser_test.go
@@ -17,241 +17,240 @@ func NewStringLabelFilter(s string) *string {
 	return &s
 }
 
-func TestParse(t *testing.T) {
-	for _, tc := range []struct {
-		in  string
-		exp Expr
-		err error
-	}{
-		{
-			// raw string
-			in: "count_over_time({foo=~`bar\\w+`}[12h] |~ `error\\`)",
-			exp: &RangeAggregationExpr{
-				Operation: "count_over_time",
-				Left: &LogRange{
-					Left: &PipelineExpr{
-						MultiStages: MultiStageExpr{
-							newLineFilterExpr(labels.MatchRegexp, "", "error\\"),
-						},
-						Left: &MatchersExpr{
-							Mts: []*labels.Matcher{
-								mustNewMatcher(labels.MatchRegexp, "foo", "bar\\w+"),
-							},
+var ParseTestCases = []struct {
+	in  string
+	exp Expr
+	err error
+}{
+	{
+		// raw string
+		in: "count_over_time({foo=~`bar\\w+`}[12h] |~ `error\\`)",
+		exp: &RangeAggregationExpr{
+			Operation: "count_over_time",
+			Left: &LogRange{
+				Left: &PipelineExpr{
+					MultiStages: MultiStageExpr{
+						newLineFilterExpr(labels.MatchRegexp, "", "error\\"),
+					},
+					Left: &MatchersExpr{
+						Mts: []*labels.Matcher{
+							mustNewMatcher(labels.MatchRegexp, "foo", "bar\\w+"),
 						},
 					},
-					Interval: 12 * time.Hour,
 				},
+				Interval: 12 * time.Hour,
 			},
 		},
-		{
-			in: `{ foo = "bar" } | decolorize`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newDecolorizeExpr(),
-				},
-			),
-		},
-		{
-			// test [12h] before filter expr
-			in: `count_over_time({foo="bar"}[12h] |= "error")`,
-			exp: &RangeAggregationExpr{
-				Operation: "count_over_time",
-				Left: &LogRange{
-					Left: newPipelineExpr(
-						newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "foo", Value: "bar"}}),
-						MultiStageExpr{
-							newLineFilterExpr(labels.MatchEqual, "", "error"),
-						},
-					),
-					Interval: 12 * time.Hour,
-				},
+	},
+	{
+		in: `{ foo = "bar" } | decolorize`,
+		exp: newPipelineExpr(
+			newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+			MultiStageExpr{
+				newDecolorizeExpr(),
 			},
-		},
-		{
-			// test [12h] after filter expr
-			in: `count_over_time({foo="bar"} |= "error" [12h])`,
-			exp: &RangeAggregationExpr{
-				Operation: "count_over_time",
-				Left: &LogRange{
-					Left: newPipelineExpr(
-						newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "foo", Value: "bar"}}),
-						MultiStageExpr{newLineFilterExpr(labels.MatchEqual, "", "error")},
-					),
-					Interval: 12 * time.Hour,
-				},
+		),
+	},
+	{
+		// test [12h] before filter expr
+		in: `count_over_time({foo="bar"}[12h] |= "error")`,
+		exp: &RangeAggregationExpr{
+			Operation: "count_over_time",
+			Left: &LogRange{
+				Left: newPipelineExpr(
+					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "foo", Value: "bar"}}),
+					MultiStageExpr{
+						newLineFilterExpr(labels.MatchEqual, "", "error"),
+					},
+				),
+				Interval: 12 * time.Hour,
 			},
 		},
-		{
-			in:  `{foo="bar"}`,
-			exp: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-		},
-		{
-			in:  `{ foo = "bar" }`,
-			exp: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-		},
-		{
-			in: `{ namespace="buzz", foo != "bar" }`,
-			exp: &MatchersExpr{Mts: []*labels.Matcher{
-				mustNewMatcher(labels.MatchEqual, "namespace", "buzz"),
-				mustNewMatcher(labels.MatchNotEqual, "foo", "bar"),
-			}},
-		},
-		{
-			in:  `{ foo =~ "bar" }`,
-			exp: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchRegexp, "foo", "bar")}},
-		},
-		{
-			in: `{ namespace="buzz", foo !~ "bar" }`,
-			exp: &MatchersExpr{Mts: []*labels.Matcher{
-				mustNewMatcher(labels.MatchEqual, "namespace", "buzz"),
-				mustNewMatcher(labels.MatchNotRegexp, "foo", "bar"),
-			}},
-		},
-		{
-			in: `count_over_time({ foo = "bar" }[12m])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 12 * time.Minute,
-				},
-				Operation: "count_over_time",
+	},
+	{
+		// test [12h] after filter expr
+		in: `count_over_time({foo="bar"} |= "error" [12h])`,
+		exp: &RangeAggregationExpr{
+			Operation: "count_over_time",
+			Left: &LogRange{
+				Left: newPipelineExpr(
+					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "foo", Value: "bar"}}),
+					MultiStageExpr{newLineFilterExpr(labels.MatchEqual, "", "error")},
+				),
+				Interval: 12 * time.Hour,
 			},
 		},
-		{
-			in: `bytes_over_time({ foo = "bar" }[12m])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 12 * time.Minute,
-				},
-				Operation: OpRangeTypeBytes,
+	},
+	{
+		in:  `{foo="bar"}`,
+		exp: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+	},
+	{
+		in:  `{ foo = "bar" }`,
+		exp: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+	},
+	{
+		in: `{ namespace="buzz", foo != "bar" }`,
+		exp: &MatchersExpr{Mts: []*labels.Matcher{
+			mustNewMatcher(labels.MatchEqual, "namespace", "buzz"),
+			mustNewMatcher(labels.MatchNotEqual, "foo", "bar"),
+		}},
+	},
+	{
+		in:  `{ foo =~ "bar" }`,
+		exp: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchRegexp, "foo", "bar")}},
+	},
+	{
+		in: `{ namespace="buzz", foo !~ "bar" }`,
+		exp: &MatchersExpr{Mts: []*labels.Matcher{
+			mustNewMatcher(labels.MatchEqual, "namespace", "buzz"),
+			mustNewMatcher(labels.MatchNotRegexp, "foo", "bar"),
+		}},
+	},
+	{
+		in: `count_over_time({ foo = "bar" }[12m])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 12 * time.Minute,
 			},
-		},
-		{
-			in: `bytes_rate({ foo = "bar" }[12m])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 12 * time.Minute,
-				},
-				Operation: OpRangeTypeBytesRate,
+			Operation: "count_over_time",
+		},
+	},
+	{
+		in: `bytes_over_time({ foo = "bar" }[12m])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 12 * time.Minute,
 			},
-		},
-		{
-			in: `rate({ foo = "bar" }[5h])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "rate",
+			Operation: OpRangeTypeBytes,
+		},
+	},
+	{
+		in: `bytes_rate({ foo = "bar" }[12m])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 12 * time.Minute,
 			},
-		},
-		{
-			in: `{ foo = "bar" }|logfmt --strict`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newLogfmtParserExpr([]string{OpStrict}),
-				},
-			),
-		},
-		{
-			in: `{ foo = "bar" }|logfmt|rate="a"`, // rate should also be able to use it as IDENTIFIER
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newLogfmtParserExpr(nil),
-					newLabelFilterExpr(log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "rate", "a"))),
-				},
-			),
-		},
-		{
-			in: `{ foo = "bar" }|logfmt|length>5d`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newLogfmtParserExpr(nil),
-					newLabelFilterExpr(log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "length", 5*24*time.Hour)),
-				},
-			),
-		},
-		{
-			in: `{ foo = "bar" }|logfmt --strict --keep-empty|length>5d`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newLogfmtParserExpr([]string{OpStrict, OpKeepEmpty}),
-					newLabelFilterExpr(log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "length", 5*24*time.Hour)),
-				},
-			),
-		},
-		{
-			in: `rate({ foo = "bar" }[5d])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * 24 * time.Hour,
-				},
-				Operation: "rate",
+			Operation: OpRangeTypeBytesRate,
+		},
+	},
+	{
+		in: `rate({ foo = "bar" }[5h])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
 			},
-		},
-		{
-			in: `count_over_time({ foo = "bar" }[1w])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 7 * 24 * time.Hour,
-				},
-				Operation: "count_over_time",
+			Operation: "rate",
+		},
+	},
+	{
+		in: `{ foo = "bar" }|logfmt --strict`,
+		exp: newPipelineExpr(
+			newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+			MultiStageExpr{
+				newLogfmtParserExpr([]string{OpStrict}),
 			},
-		},
-		{
-			in: `absent_over_time({ foo = "bar" }[1w])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 7 * 24 * time.Hour,
-				},
-				Operation: OpRangeTypeAbsent,
+		),
+	},
+	{
+		in: `{ foo = "bar" }|logfmt|rate="a"`, // rate should also be able to use it as IDENTIFIER
+		exp: newPipelineExpr(
+			newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+			MultiStageExpr{
+				newLogfmtParserExpr(nil),
+				newLabelFilterExpr(log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "rate", "a"))),
 			},
-		},
-		{
-			in: `sum(rate({ foo = "bar" }[5h]))`,
-			exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "rate",
-			}, "sum", nil, nil),
-		},
-		{
-			in: `sum(rate({ foo ="bar" }[1y]))`,
-			exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 365 * 24 * time.Hour,
-				},
-				Operation: "rate",
-			}, "sum", nil, nil),
-		},
-		{
-			in: `avg(count_over_time({ foo = "bar" }[5h])) by (bar,foo)`,
-			exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "count_over_time",
-			}, "avg", &Grouping{
-				Without: false,
-				Groups:  []string{"bar", "foo"},
-			}, nil),
-		},
-		{
-			in: `avg(
+		),
+	},
+	{
+		in: `{ foo = "bar" }|logfmt|length>5d`,
+		exp: newPipelineExpr(
+			newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+			MultiStageExpr{
+				newLogfmtParserExpr(nil),
+				newLabelFilterExpr(log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "length", 5*24*time.Hour)),
+			},
+		),
+	},
+	{
+		in: `{ foo = "bar" }|logfmt --strict --keep-empty|length>5d`,
+		exp: newPipelineExpr(
+			newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+			MultiStageExpr{
+				newLogfmtParserExpr([]string{OpStrict, OpKeepEmpty}),
+				newLabelFilterExpr(log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "length", 5*24*time.Hour)),
+			},
+		),
+	},
+	{
+		in: `rate({ foo = "bar" }[5d])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * 24 * time.Hour,
+			},
+			Operation: "rate",
+		},
+	},
+	{
+		in: `count_over_time({ foo = "bar" }[1w])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 7 * 24 * time.Hour,
+			},
+			Operation: "count_over_time",
+		},
+	},
+	{
+		in: `absent_over_time({ foo = "bar" }[1w])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 7 * 24 * time.Hour,
+			},
+			Operation: OpRangeTypeAbsent,
+		},
+	},
+	{
+		in: `sum(rate({ foo = "bar" }[5h]))`,
+		exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "rate",
+		}, "sum", nil, nil),
+	},
+	{
+		in: `sum(rate({ foo ="bar" }[1y]))`,
+		exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 365 * 24 * time.Hour,
+			},
+			Operation: "rate",
+		}, "sum", nil, nil),
+	},
+	{
+		in: `avg(count_over_time({ foo = "bar" }[5h])) by (bar,foo)`,
+		exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "count_over_time",
+		}, "avg", &Grouping{
+			Without: false,
+			Groups:  []string{"bar", "foo"},
+		}, nil),
+	},
+	{
+		in: `avg(
 					label_replace(
 						count_over_time({ foo = "bar" }[5h]),
 						"bar",
@@ -260,499 +259,499 @@ func TestParse(t *testing.T) {
 						"(.*).(.*)"
 					)
 				) by (bar,foo)`,
-			exp: mustNewVectorAggregationExpr(
-				mustNewLabelReplaceExpr(
-					&RangeAggregationExpr{
-						Left: &LogRange{
-							Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-							Interval: 5 * time.Hour,
-						},
-						Operation: "count_over_time",
+		exp: mustNewVectorAggregationExpr(
+			mustNewLabelReplaceExpr(
+				&RangeAggregationExpr{
+					Left: &LogRange{
+						Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+						Interval: 5 * time.Hour,
 					},
-					"bar", "$1$2", "foo", "(.*).(.*)",
-				),
-				"avg", &Grouping{
-					Without: false,
-					Groups:  []string{"bar", "foo"},
-				}, nil),
-		},
-		{
-			in: `avg(count_over_time({ foo = "bar" }[5h])) by ()`,
-			exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "count_over_time",
-			}, "avg", &Grouping{
-				Without: false,
-				Groups:  nil,
-			}, nil),
-		},
-		{
-			in: `max without (bar) (count_over_time({ foo = "bar" }[5h]))`,
-			exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "count_over_time",
-			}, "max", &Grouping{
-				Without: true,
-				Groups:  []string{"bar"},
-			}, nil),
-		},
-		{
-			in: `max without () (count_over_time({ foo = "bar" }[5h]))`,
-			exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "count_over_time",
-			}, "max", &Grouping{
-				Without: true,
-				Groups:  nil,
-			}, nil),
-		},
-		{
-			in: `topk(10,count_over_time({ foo = "bar" }[5h])) without (bar)`,
-			exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "count_over_time",
-			}, "topk", &Grouping{
-				Without: true,
-				Groups:  []string{"bar"},
-			}, NewStringLabelFilter("10")),
-		},
-		{
-			in: `bottomk(30 ,sum(rate({ foo = "bar" }[5h])) by (foo))`,
-			exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
-				},
-				Operation: "rate",
-			}, "sum", &Grouping{
-				Groups:  []string{"foo"},
-				Without: false,
-			}, nil), "bottomk", nil,
-				NewStringLabelFilter("30")),
-		},
-		{
-			in: `max( sum(count_over_time({ foo = "bar" }[5h])) without (foo,bar) ) by (foo)`,
-			exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&RangeAggregationExpr{
-				Left: &LogRange{
-					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-					Interval: 5 * time.Hour,
+					Operation: "count_over_time",
 				},
-				Operation: "count_over_time",
-			}, "sum", &Grouping{
-				Groups:  []string{"foo", "bar"},
-				Without: true,
-			}, nil), "max", &Grouping{
-				Groups:  []string{"foo"},
+				"bar", "$1$2", "foo", "(.*).(.*)",
+			),
+			"avg", &Grouping{
 				Without: false,
+				Groups:  []string{"bar", "foo"},
 			}, nil),
-		},
-		{
-			in:  `unk({ foo = "bar" }[5m])`,
-			err: logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER", 1, 1),
-		},
-		{
-			in:  `absent_over_time({ foo = "bar" }[5h]) by (foo)`,
-			err: logqlmodel.NewParseError("grouping not allowed for absent_over_time aggregation", 0, 0),
-		},
-		{
-			in:  `rate({ foo = "bar" }[5minutes])`,
-			err: logqlmodel.NewParseError(`unknown unit "minutes" in duration "5minutes"`, 0, 21),
-		},
-		{
-			in:  `label_replace(rate({ foo = "bar" }[5m]),"")`,
-			err: logqlmodel.NewParseError(`syntax error: unexpected ), expecting ,`, 1, 43),
-		},
-		{
-			in:  `label_replace(rate({ foo = "bar" }[5m]),"foo","$1","bar","^^^^x43\\q")`,
-			err: logqlmodel.NewParseError("invalid regex in label_replace: error parsing regexp: invalid escape sequence: `\\q`", 0, 0),
-		},
-		{
-			in:  `rate({ foo = "bar" }[5)`,
-			err: logqlmodel.NewParseError("missing closing ']' in duration", 0, 21),
-		},
-		{
-			in:  `min({ foo = "bar" }[5m])`,
-			err: logqlmodel.NewParseError("syntax error: unexpected RANGE", 0, 20),
-		},
-		// line filter for ip-matcher
-		{
-			in: `{foo="bar"} |= "baz" |= ip("123.123.123.123")`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newNestedLineFilterExpr(
-						newLineFilterExpr(labels.MatchEqual, "", "baz"),
-						newLineFilterExpr(labels.MatchEqual, OpFilterIP, "123.123.123.123"),
-					),
-				},
-			),
-		},
-		{
-			in: `{ foo = "bar" , ip="foo"}|logfmt|= ip("127.0.0.1")|ip="2.3.4.5"|ip="abc"|ipaddr=ip("4.5.6.7")|ip=ip("6.7.8.9")`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar"), mustNewMatcher(labels.MatchEqual, "ip", "foo")}),
-				MultiStageExpr{
-					newLogfmtParserExpr(nil),
-					newLineFilterExpr(labels.MatchEqual, OpFilterIP, "127.0.0.1"),
-					newLabelFilterExpr(log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "ip", "2.3.4.5"))),
-					newLabelFilterExpr(log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "ip", "abc"))),
-					newLabelFilterExpr(log.NewIPLabelFilter("4.5.6.7", "ipaddr", log.LabelFilterEqual)),
-					newLabelFilterExpr(log.NewIPLabelFilter("6.7.8.9", "ip", log.LabelFilterEqual)),
-				},
-			),
-		},
-		{
-			in: `{foo="bar"} |= ip("123.123.123.123")`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
+	},
+	{
+		in: `avg(count_over_time({ foo = "bar" }[5h])) by ()`,
+		exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "count_over_time",
+		}, "avg", &Grouping{
+			Without: false,
+			Groups:  nil,
+		}, nil),
+	},
+	{
+		in: `max without (bar) (count_over_time({ foo = "bar" }[5h]))`,
+		exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "count_over_time",
+		}, "max", &Grouping{
+			Without: true,
+			Groups:  []string{"bar"},
+		}, nil),
+	},
+	{
+		in: `max without () (count_over_time({ foo = "bar" }[5h]))`,
+		exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "count_over_time",
+		}, "max", &Grouping{
+			Without: true,
+			Groups:  nil,
+		}, nil),
+	},
+	{
+		in: `topk(10,count_over_time({ foo = "bar" }[5h])) without (bar)`,
+		exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "count_over_time",
+		}, "topk", &Grouping{
+			Without: true,
+			Groups:  []string{"bar"},
+		}, NewStringLabelFilter("10")),
+	},
+	{
+		in: `bottomk(30 ,sum(rate({ foo = "bar" }[5h])) by (foo))`,
+		exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "rate",
+		}, "sum", &Grouping{
+			Groups:  []string{"foo"},
+			Without: false,
+		}, nil), "bottomk", nil,
+			NewStringLabelFilter("30")),
+	},
+	{
+		in: `max( sum(count_over_time({ foo = "bar" }[5h])) without (foo,bar) ) by (foo)`,
+		exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&RangeAggregationExpr{
+			Left: &LogRange{
+				Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+				Interval: 5 * time.Hour,
+			},
+			Operation: "count_over_time",
+		}, "sum", &Grouping{
+			Groups:  []string{"foo", "bar"},
+			Without: true,
+		}, nil), "max", &Grouping{
+			Groups:  []string{"foo"},
+			Without: false,
+		}, nil),
+	},
+	{
+		in:  `unk({ foo = "bar" }[5m])`,
+		err: logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER", 1, 1),
+	},
+	{
+		in:  `absent_over_time({ foo = "bar" }[5h]) by (foo)`,
+		err: logqlmodel.NewParseError("grouping not allowed for absent_over_time aggregation", 0, 0),
+	},
+	{
+		in:  `rate({ foo = "bar" }[5minutes])`,
+		err: logqlmodel.NewParseError(`unknown unit "minutes" in duration "5minutes"`, 0, 21),
+	},
+	{
+		in:  `label_replace(rate({ foo = "bar" }[5m]),"")`,
+		err: logqlmodel.NewParseError(`syntax error: unexpected ), expecting ,`, 1, 43),
+	},
+	{
+		in:  `label_replace(rate({ foo = "bar" }[5m]),"foo","$1","bar","^^^^x43\\q")`,
+		err: logqlmodel.NewParseError("invalid regex in label_replace: error parsing regexp: invalid escape sequence: `\\q`", 0, 0),
+	},
+	{
+		in:  `rate({ foo = "bar" }[5)`,
+		err: logqlmodel.NewParseError("missing closing ']' in duration", 0, 21),
+	},
+	{
+		in:  `min({ foo = "bar" }[5m])`,
+		err: logqlmodel.NewParseError("syntax error: unexpected RANGE", 0, 20),
+	},
+	// line filter for ip-matcher
+	{
+		in: `{foo="bar"} |= "baz" |= ip("123.123.123.123")`,
+		exp: newPipelineExpr(
+			newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
+			MultiStageExpr{
+				newNestedLineFilterExpr(
+					newLineFilterExpr(labels.MatchEqual, "", "baz"),
 					newLineFilterExpr(labels.MatchEqual, OpFilterIP, "123.123.123.123"),
-				},
-			),
-		},
-		{
-			in: `{foo="bar"} |= ip("123.123.123.123")|= "baz"`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newNestedLineFilterExpr(
-						newLineFilterExpr(labels.MatchEqual, OpFilterIP, "123.123.123.123"),
-						newLineFilterExpr(labels.MatchEqual, "", "baz"),
-					),
-				},
-			),
-		},
-		{
-			in: `{foo="bar"} |= ip("123.123.123.123")|= "baz" |=ip("123.123.123.123")`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newNestedLineFilterExpr(
-						newNestedLineFilterExpr(
-							newLineFilterExpr(labels.MatchEqual, OpFilterIP, "123.123.123.123"),
-							newLineFilterExpr(labels.MatchEqual, "", "baz"),
-						),
-						newLineFilterExpr(labels.MatchEqual, OpFilterIP, "123.123.123.123"),
-					),
-				},
-			),
-		},
-		{
-			in: `{foo="bar"} |= "baz" |= ip("123.123.123.123")`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newNestedLineFilterExpr(
-						newLineFilterExpr(labels.MatchEqual, "", "baz"),
-						newLineFilterExpr(labels.MatchEqual, OpFilterIP, "123.123.123.123"),
-					),
-				},
-			),
-		},
-		{
-			in: `{foo="bar"} != ip("123.123.123.123")`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, "123.123.123.123"),
-				},
-			),
-		},
-		{
-			in: `{foo="bar"} != ip("123.123.123.123")|= "baz"`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newNestedLineFilterExpr(
-						newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, "123.123.123.123"),
-						newLineFilterExpr(labels.MatchEqual, "", "baz"),
-					),
-				},
-			),
-		},
-		{
-			in: `{foo="bar"} != ip("123.123.123.123")|= "baz" !=ip("123.123.123.123")`,
-			exp: newPipelineExpr(
-				newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}),
-				MultiStageExpr{
-					newNestedLineFilterExpr(
-						newNestedLineFilterExpr(
-							newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, "123.123.123.123"),
-							newLineFilterExpr(labels.MatchEqual, "", "baz"),
-						),
-						newLineFilterExpr(labels.MatchNotEqual, OpFilterIP, "123.123.123.123"),
-					),
-				},
-			),
-		},
-		// label filter for ip-matcher
-		{
-			in:  `{ foo = "bar" }|logfmt|addr>=ip("1.2.3.4")`,
-			err: logqlmodel.NewParseError("syntax error: unexpected ip, expecting BYTES or NUMBER or DURATION", 1, 30),
-		},
-		{
-			in:  `{ foo = "bar" }|logfmt|addr>ip("1.2.3.4")`,
-			err: logqlmodel.NewParseError("syntax error: unexpected ip, expecting BYTES or NUMBER or DURATION", 1, 29),
-		},
-		{
-			in:  `{ foo = "bar" }|logfmt|addr<=ip("1.2.3.4")`,
-			err: logqlmodel.NewParseError("syntax error: unexpected ip, expecting BYTES or NUMBER or DURATION", 1, 30),
-		},
-		{
-			in:  `{ foo = "bar" }|logfmt|addr=ip("1.2.3.4")`,
+		err: logqlmodel.NewParseError("syntax error: unexpected ip, expecting BYTES or NUMBER or DURATION", 1, 30),
+	},
+	{
+		in:  `{ foo = "bar" }|logfmt|addr>ip("1.2.3.4")`,
+		err: logqlmodel.NewParseError("syntax error: unexpected ip, expecting BYTES or NUMBER or DURATION", 1, 29),
+	},
+	{
+		in:  `{ foo = "bar" }|logfmt|addr<=ip("1.2.3.4")`,
+		err: logqlmodel.NewParseError("syntax error: unexpected ip, expecting BYTES or NUMBER or DURATION", 1, 30),
+	},
+	{
+		in:  `{ foo = "bar" }|logfmt|addr= 250ms or ( status_code < 500 and status_code > 200)`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+						),
 					),
-					mustNewVectorAggregationExpr(newRangeAggregationExpr(
-						&LogRange{
-							Left: &MatchersExpr{
-								Mts: []*labels.Matcher{
-									mustNewMatcher(labels.MatchEqual, "foo", "bar"),
-								},
-							},
-							Interval: 5 * time.Minute,
-						}, OpRangeTypeCount, nil, nil),
-						"sum",
-						&Grouping{
-							Without: false,
-							Groups:  []string{"foo"},
-						},
-						nil,
+				},
+			},
+		},
+	},
+	{
+		in: `{app="foo"} |= "bar" | unpack | json | latency >= 250ms or ( status_code < 500 and status_code > 200)`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeUnpack, ""),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+						),
 					),
-				),
-			),
+				},
+			},
 		},
-		{
-			in: `sum by (job) (
-							count_over_time({namespace="tns"} |= "level=error"[5m])
-						/
-							count_over_time({namespace="tns"}[5m])
-						)`,
-			exp: mustNewVectorAggregationExpr(
-				mustNewBinOpExpr(OpTypeDiv,
-					&BinOpOptions{
-						VectorMatching: &VectorMatching{Card: CardOneToOne},
-					},
-					newRangeAggregationExpr(
-						&LogRange{
-							Left: newPipelineExpr(
-								newMatcherExpr([]*labels.Matcher{
-									mustNewMatcher(labels.MatchEqual, "namespace", "tns"),
-								}),
-								MultiStageExpr{
-									newLineFilterExpr(labels.MatchEqual, "", "level=error"),
-								}),
-							Interval: 5 * time.Minute,
-						}, OpRangeTypeCount, nil, nil),
-					newRangeAggregationExpr(
-						&LogRange{
-							Left: &MatchersExpr{
-								Mts: []*labels.Matcher{
-									mustNewMatcher(labels.MatchEqual, "namespace", "tns"),
-								},
-							},
-							Interval: 5 * time.Minute,
-						}, OpRangeTypeCount, nil, nil)), OpTypeSum, &Grouping{Groups: []string{"job"}}, nil),
+	},
+	{
+		in: `{app="foo"} |= "bar" | json | (duration > 1s or status!= 200) and method!="POST"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewAndLabelFilter(
+						log.NewOrLabelFilter(
+							log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "duration", 1*time.Second),
+							log.NewNumericLabelFilter(log.LabelFilterNotEqual, "status", 200.0),
+						),
+						log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotEqual, "method", "POST")),
+					),
+				},
+			},
 		},
-		{
-			in: `sum by (job) (
-							count_over_time({namespace="tns"} |= "level=error"[5m])
-						/
-							count_over_time({namespace="tns"}[5m])
-						) * 100`,
-			exp: mustNewBinOpExpr(OpTypeMul, &BinOpOptions{
-				VectorMatching: &VectorMatching{Card: CardOneToOne},
-			}, mustNewVectorAggregationExpr(
-				mustNewBinOpExpr(OpTypeDiv,
-					&BinOpOptions{
-						VectorMatching: &VectorMatching{Card: CardOneToOne},
-					},
-					newRangeAggregationExpr(
-						&LogRange{
-							Left: newPipelineExpr(
-								newMatcherExpr([]*labels.Matcher{
-									mustNewMatcher(labels.MatchEqual, "namespace", "tns"),
-								}),
-								MultiStageExpr{
-									newLineFilterExpr(labels.MatchEqual, "", "level=error"),
-								}),
-							Interval: 5 * time.Minute,
-						}, OpRangeTypeCount, nil, nil),
-					newRangeAggregationExpr(
-						&LogRange{
-							Left: &MatchersExpr{
-								Mts: []*labels.Matcher{
-									mustNewMatcher(labels.MatchEqual, "namespace", "tns"),
-								},
-							},
-							Interval: 5 * time.Minute,
-						}, OpRangeTypeCount, nil, nil)), OpTypeSum, &Grouping{Groups: []string{"job"}}, nil),
-				mustNewLiteralExpr("100", false),
-			),
+	},
+	{
+		in: `{app="foo"} |= "bar" | pattern " bar " | (duration > 1s or status!= 200) and method!="POST"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypePattern, " bar "),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewAndLabelFilter(
+						log.NewOrLabelFilter(
+							log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "duration", 1*time.Second),
+							log.NewNumericLabelFilter(log.LabelFilterNotEqual, "status", 200.0),
+						),
+						log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotEqual, "method", "POST")),
+					),
+				},
+			},
 		},
-		{
-			// reduces binop with two literalExprs
-			in: `sum(count_over_time({foo="bar"}[5m])) by (foo) + 1 / 2`,
-			exp: mustNewBinOpExpr(
-				OpTypeAdd,
-				&BinOpOptions{
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
+	},
+	{
+		in: `{app="foo"} |= "bar" | json | ( status_code < 500 and status_code > 200) or latency >= 250ms `,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+						),
+						log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+					),
 				},
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						&LogRange{
-							Left: &MatchersExpr{
-								Mts: []*labels.Matcher{
-									mustNewMatcher(labels.MatchEqual, "foo", "bar"),
-								},
-							},
-							Interval: 5 * time.Minute,
-						}, OpRangeTypeCount, nil, nil),
-					"sum",
-					&Grouping{
-						Without: false,
-						Groups:  []string{"foo"},
-					},
-					nil,
-				),
-				&LiteralExpr{Val: 0.5},
-			),
+			},
 		},
-		{
-			// test signs
-			in: `1 + -2 / 1`,
-			exp: mustNewBinOpExpr(
-				OpTypeAdd,
-				&BinOpOptions{
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
+	},
+	{
+		in: `{app="foo"} |= "bar" | json | ( status_code < 500 or status_code > 200) and latency >= 250ms `,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewAndLabelFilter(
+						log.NewOrLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+						),
+						log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+					),
 				},
-				&LiteralExpr{Val: 1},
-				mustNewBinOpExpr(OpTypeDiv, &BinOpOptions{
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
-				}, &LiteralExpr{Val: -2}, &LiteralExpr{Val: 1}),
-			),
+			},
 		},
-		{
-			// test signs/ops with equal associativity
-			in: `1 + 1 - -1`,
-			exp: mustNewBinOpExpr(
-				OpTypeSub,
-				&BinOpOptions{
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
+	},
+	{
+		in: `{app="foo"} |= "bar" | json |  status_code < 500 or status_code > 200 and latency >= 250ms `,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+							log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+						),
+					),
 				},
-				mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
-				}, &LiteralExpr{Val: 1}, &LiteralExpr{Val: 1}),
-				&LiteralExpr{Val: -1},
-			),
+			},
 		},
-		{
-			in: `{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)`,
-			exp: &PipelineExpr{
+	},
+	{
+		in: `{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+				| foo="bar" buzz!="blip", blop=~"boop" or fuzz==5`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+						),
+					),
+				},
+				&LabelFilterExpr{
+					LabelFilterer: log.NewAndLabelFilter(
+						log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "foo", "bar")),
+						log.NewAndLabelFilter(
+							log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotEqual, "buzz", "blip")),
+							log.NewOrLabelFilter(
+								log.NewStringLabelFilter(mustNewMatcher(labels.MatchRegexp, "blop", "boop")),
+								log.NewNumericLabelFilter(log.LabelFilterEqual, "fuzz", 5),
+							),
+						),
+					),
+				},
+			},
+		},
+	},
+	{
+		in: `{app="foo"} |= "bar" | line_format "blip{{ .foo }}blop"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLineFmtExpr("blip{{ .foo }}blop"),
+			},
+		},
+	},
+	{
+		in: `{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+						),
+					),
+				},
+				newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+			},
+		},
+	},
+	{
+		in: `{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+						),
+					),
+				},
+				newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+				newLabelFmtExpr([]log.LabelFmt{
+					log.NewRenameLabelFmt("foo", "bar"),
+					log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+				}),
+			},
+		},
+	},
+	{
+		in: `count_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}"[5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
 				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 				MultiStages: MultiStageExpr{
 					newLineFilterExpr(labels.MatchEqual, "", "bar"),
@@ -1380,16 +1603,63 @@ func TestParse(t *testing.T) {
 							),
 						),
 					},
+					newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+					newLabelFmtExpr([]log.LabelFmt{
+						log.NewRenameLabelFmt("foo", "bar"),
+						log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+					}),
+				},
+			},
+				5*time.Minute,
+				nil, nil),
+			OpRangeTypeCount,
+			nil,
+			nil,
+		),
+	},
+	{
+		in:  "{app=~\"\xa0\xa1\"}",
+		exp: nil,
+		err: logqlmodel.NewParseError("invalid UTF-8 encoding", 1, 7),
+	},
+	{
+		in: `sum_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}"[5m])`,
+		exp: nil,
+		err: logqlmodel.NewParseError("invalid aggregation sum_over_time without unwrap", 0, 0),
+	},
+	{
+		in:  `count_over_time({app="foo"} |= "foo" | json | unwrap foo [5m])`,
+		exp: nil,
+		err: logqlmodel.NewParseError("invalid aggregation count_over_time with unwrap", 0, 0),
+	},
+	{
+		in: `{app="foo"} |= "bar" | json |  status_code < 500 or status_code > 200 and size >= 2.5KiB `,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
+				&LabelFilterExpr{
+					LabelFilterer: log.NewOrLabelFilter(
+						log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+						log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
+							log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, "size", 2560),
+						),
+					),
 				},
 			},
 		},
-		{
-			in: `{app="foo"} |= "bar" | unpack | json | latency >= 250ms or ( status_code < 500 and status_code > 200)`,
-			exp: &PipelineExpr{
+	},
+	{
+		in: `stdvar_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
 				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 				MultiStages: MultiStageExpr{
 					newLineFilterExpr(labels.MatchEqual, "", "bar"),
-					newLabelParserExpr(OpParserTypeUnpack, ""),
 					newLabelParserExpr(OpParserTypeJSON, ""),
 					&LabelFilterExpr{
 						LabelFilterer: log.NewOrLabelFilter(
@@ -1400,108 +1670,224 @@ func TestParse(t *testing.T) {
 							),
 						),
 					},
+					newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+					newLabelFmtExpr([]log.LabelFmt{
+						log.NewRenameLabelFmt("foo", "bar"),
+						log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+					}),
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | json | (duration > 1s or status!= 200) and method!="POST"`,
-			exp: &PipelineExpr{
+				5*time.Minute,
+				newUnwrapExpr("foo", ""),
+				nil),
+			OpRangeTypeStdvar, nil, nil,
+		),
+	},
+	{
+		in: `stdvar_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration(foo) [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
 				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 				MultiStages: MultiStageExpr{
 					newLineFilterExpr(labels.MatchEqual, "", "bar"),
 					newLabelParserExpr(OpParserTypeJSON, ""),
 					&LabelFilterExpr{
-						LabelFilterer: log.NewAndLabelFilter(
-							log.NewOrLabelFilter(
-								log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "duration", 1*time.Second),
-								log.NewNumericLabelFilter(log.LabelFilterNotEqual, "status", 200.0),
+						LabelFilterer: log.NewOrLabelFilter(
+							log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+							log.NewAndLabelFilter(
+								log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+								log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
 							),
-							log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotEqual, "method", "POST")),
 						),
 					},
+					newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+					newLabelFmtExpr([]log.LabelFmt{
+						log.NewRenameLabelFmt("foo", "bar"),
+						log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+					}),
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | pattern " bar " | (duration > 1s or status!= 200) and method!="POST"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("foo", OpConvDuration),
+				nil),
+			OpRangeTypeStdvar, nil, nil,
+		),
+	},
+	{
+		in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap bytes(foo) [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
+				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
 				MultiStages: MultiStageExpr{
-					newLineFilterExpr(labels.MatchEqual, "", "bar"),
-					newLabelParserExpr(OpParserTypePattern, " bar "),
+					newLineFilterExpr(labels.MatchEqual, "", "level=error"),
+					newLabelParserExpr(OpParserTypeJSON, ""),
 					&LabelFilterExpr{
 						LabelFilterer: log.NewAndLabelFilter(
-							log.NewOrLabelFilter(
-								log.NewDurationLabelFilter(log.LabelFilterGreaterThan, "duration", 1*time.Second),
-								log.NewNumericLabelFilter(log.LabelFilterNotEqual, "status", 200.0),
-							),
-							log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotEqual, "method", "POST")),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, "foo", 5),
+							log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
 						),
 					},
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | json | ( status_code < 500 and status_code > 200) or latency >= 250ms `,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("foo", OpConvBytes),
+				nil),
+			OpRangeTypeSum, nil, nil,
+		),
+	},
+	{
+		in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap bytes(foo) [5m] offset 5m)`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
+				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
 				MultiStages: MultiStageExpr{
-					newLineFilterExpr(labels.MatchEqual, "", "bar"),
+					newLineFilterExpr(labels.MatchEqual, "", "level=error"),
 					newLabelParserExpr(OpParserTypeJSON, ""),
 					&LabelFilterExpr{
-						LabelFilterer: log.NewOrLabelFilter(
-							log.NewAndLabelFilter(
-								log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-								log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-							),
-							log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+						LabelFilterer: log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, "foo", 5),
+							log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
 						),
 					},
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | json | ( status_code < 500 or status_code > 200) and latency >= 250ms `,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("foo", OpConvBytes),
+				newOffsetExpr(5*time.Minute)),
+			OpRangeTypeSum, nil, nil,
+		),
+	},
+	{
+		in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
+				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
 				MultiStages: MultiStageExpr{
-					newLineFilterExpr(labels.MatchEqual, "", "bar"),
+					newLineFilterExpr(labels.MatchEqual, "", "level=error"),
 					newLabelParserExpr(OpParserTypeJSON, ""),
 					&LabelFilterExpr{
 						LabelFilterer: log.NewAndLabelFilter(
-							log.NewOrLabelFilter(
-								log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-								log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-							),
-							log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+							log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, "foo", 5),
+							log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
 						),
 					},
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | json |  status_code < 500 or status_code > 200 and latency >= 250ms `,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("latency", ""),
+				nil),
+			OpRangeTypeSum, nil, nil,
+		),
+	},
+	{
+		in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo==5,bar<25ms| unwrap latency [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
+				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
 				MultiStages: MultiStageExpr{
-					newLineFilterExpr(labels.MatchEqual, "", "bar"),
+					newLineFilterExpr(labels.MatchEqual, "", "level=error"),
 					newLabelParserExpr(OpParserTypeJSON, ""),
 					&LabelFilterExpr{
-						LabelFilterer: log.NewOrLabelFilter(
-							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-							log.NewAndLabelFilter(
-								log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-								log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-							),
+						LabelFilterer: log.NewAndLabelFilter(
+							log.NewNumericLabelFilter(log.LabelFilterEqual, "foo", 5),
+							log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
 						),
 					},
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-				| foo="bar" buzz!="blip", blop=~"boop" or fuzz==5`,
-			exp: &PipelineExpr{
+				5*time.Minute,
+				newUnwrapExpr("latency", ""),
+				nil),
+			OpRangeTypeSum, nil, nil,
+		),
+	},
+	{
+		in: `stddev_over_time({app="foo"} |= "bar" | unwrap bar [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
+				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				MultiStages: MultiStageExpr{
+					newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				},
+			},
+				5*time.Minute,
+				newUnwrapExpr("bar", ""),
+				nil),
+			OpRangeTypeStddev, nil, nil,
+		),
+	},
+	{
+		in: `min_over_time({app="foo"} | unwrap bar [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(
+				newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("bar", ""),
+				nil),
+			OpRangeTypeMin, nil, nil,
+		),
+	},
+	{
+		in: `min_over_time({app="foo"} | unwrap bar [5m]) by ()`,
+		exp: newRangeAggregationExpr(
+			newLogRange(
+				newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("bar", ""),
+				nil),
+			OpRangeTypeMin, &Grouping{}, nil,
+		),
+	},
+	{
+		in: `max_over_time({app="foo"} | unwrap bar [5m]) without ()`,
+		exp: newRangeAggregationExpr(
+			newLogRange(
+				newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("bar", ""),
+				nil),
+			OpRangeTypeMax, &Grouping{Without: true}, nil,
+		),
+	},
+	{
+		in: `max_over_time({app="foo"} | unwrap bar [5m]) without (foo,bar)`,
+		exp: newRangeAggregationExpr(
+			newLogRange(
+				newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("bar", ""),
+				nil),
+			OpRangeTypeMax, &Grouping{Without: true, Groups: []string{"foo", "bar"}}, nil,
+		),
+	},
+	{
+		in: `max_over_time({app="foo"} | unwrap bar [5m] offset 5m) without (foo,bar)`,
+		exp: newRangeAggregationExpr(
+			newLogRange(
+				newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("bar", ""),
+				newOffsetExpr(5*time.Minute)),
+			OpRangeTypeMax, &Grouping{Without: true, Groups: []string{"foo", "bar"}}, nil,
+		),
+	},
+	{
+		in: `max_over_time({app="foo"} | unwrap bar [5m] offset -5m) without (foo,bar)`,
+		exp: newRangeAggregationExpr(
+			newLogRange(
+				newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+				5*time.Minute,
+				newUnwrapExpr("bar", ""),
+				newOffsetExpr(-5*time.Minute)),
+			OpRangeTypeMax, &Grouping{Without: true, Groups: []string{"foo", "bar"}}, nil,
+		),
+	},
+	{
+		in: `max_over_time(({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo )[5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
 				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 				MultiStages: MultiStageExpr{
 					newLineFilterExpr(labels.MatchEqual, "", "bar"),
@@ -1515,35 +1901,24 @@ func TestParse(t *testing.T) {
 							),
 						),
 					},
-					&LabelFilterExpr{
-						LabelFilterer: log.NewAndLabelFilter(
-							log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "foo", "bar")),
-							log.NewAndLabelFilter(
-								log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotEqual, "buzz", "blip")),
-								log.NewOrLabelFilter(
-									log.NewStringLabelFilter(mustNewMatcher(labels.MatchRegexp, "blop", "boop")),
-									log.NewNumericLabelFilter(log.LabelFilterEqual, "fuzz", 5),
-								),
-							),
-						),
-					},
-				},
-			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | line_format "blip{{ .foo }}blop"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLineFilterExpr(labels.MatchEqual, "", "bar"),
-					newLineFmtExpr("blip{{ .foo }}blop"),
+					newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+					newLabelFmtExpr([]log.LabelFmt{
+						log.NewRenameLabelFmt("foo", "bar"),
+						log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+					}),
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}"`,
-			exp: &PipelineExpr{
+				5*time.Minute,
+				newUnwrapExpr("foo", ""),
+				nil),
+			OpRangeTypeMax, nil, nil,
+		),
+	},
+	{
+		in: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m])`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
 				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 				MultiStages: MultiStageExpr{
 					newLineFilterExpr(labels.MatchEqual, "", "bar"),
@@ -1558,13 +1933,23 @@ func TestParse(t *testing.T) {
 						),
 					},
 					newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+					newLabelFmtExpr([]log.LabelFmt{
+						log.NewRenameLabelFmt("foo", "bar"),
+						log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+					}),
 				},
 			},
-		},
-		{
-			in: `{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}"`,
-			exp: &PipelineExpr{
+				5*time.Minute,
+				newUnwrapExpr("foo", ""),
+				nil),
+			OpRangeTypeQuantile, nil, NewStringLabelFilter("0.99998"),
+		),
+	},
+	{
+		in: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]) by (namespace,instance)`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
 				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 				MultiStages: MultiStageExpr{
 					newLineFilterExpr(labels.MatchEqual, "", "bar"),
@@ -1585,78 +1970,51 @@ func TestParse(t *testing.T) {
 					}),
 				},
 			},
-		},
-		{
-			in: `count_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}"[5m])`,
-			exp: newRangeAggregationExpr(
-				newLogRange(&PipelineExpr{
-					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					MultiStages: MultiStageExpr{
-						newLineFilterExpr(labels.MatchEqual, "", "bar"),
-						newLabelParserExpr(OpParserTypeJSON, ""),
-						&LabelFilterExpr{
-							LabelFilterer: log.NewOrLabelFilter(
-								log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-								log.NewAndLabelFilter(
-									log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-									log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-								),
-							),
-						},
-						newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-						newLabelFmtExpr([]log.LabelFmt{
-							log.NewRenameLabelFmt("foo", "bar"),
-							log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-						}),
-					},
-				},
-					5*time.Minute,
-					nil, nil),
-				OpRangeTypeCount,
-				nil,
-				nil,
-			),
-		},
-		{
-			in:  "{app=~\"\xa0\xa1\"}",
-			exp: nil,
-			err: logqlmodel.NewParseError("invalid UTF-8 encoding", 1, 7),
-		},
-		{
-			in: `sum_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}"[5m])`,
-			exp: nil,
-			err: logqlmodel.NewParseError("invalid aggregation sum_over_time without unwrap", 0, 0),
-		},
-		{
-			in:  `count_over_time({app="foo"} |= "foo" | json | unwrap foo [5m])`,
-			exp: nil,
-			err: logqlmodel.NewParseError("invalid aggregation count_over_time with unwrap", 0, 0),
-		},
-		{
-			in: `{app="foo"} |= "bar" | json |  status_code < 500 or status_code > 200 and size >= 2.5KiB `,
-			exp: &PipelineExpr{
+				5*time.Minute,
+				newUnwrapExpr("foo", ""),
+				nil),
+			OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
+		),
+	},
+	{
+		in: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo | __error__ !~".+"[5m]) by (namespace,instance)`,
+		exp: newRangeAggregationExpr(
+			newLogRange(&PipelineExpr{
 				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 				MultiStages: MultiStageExpr{
 					newLineFilterExpr(labels.MatchEqual, "", "bar"),
 					newLabelParserExpr(OpParserTypeJSON, ""),
 					&LabelFilterExpr{
 						LabelFilterer: log.NewOrLabelFilter(
-							log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+							log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
 							log.NewAndLabelFilter(
+								log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
 								log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-								log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, "size", 2560),
 							),
 						),
 					},
+					newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+					newLabelFmtExpr([]log.LabelFmt{
+						log.NewRenameLabelFmt("foo", "bar"),
+						log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+					}),
 				},
 			},
-		},
-		{
-			in: `stdvar_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m])`,
-			exp: newRangeAggregationExpr(
+				5*time.Minute,
+				newUnwrapExpr("foo", "").addPostFilter(log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotRegexp, logqlmodel.ErrorLabel, ".+"))),
+				nil),
+			OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
+		),
+	},
+	{
+		in: `sum without (foo) (
+				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
+								) by (namespace,instance)
+					)`,
+		exp: mustNewVectorAggregationExpr(
+			newRangeAggregationExpr(
 				newLogRange(&PipelineExpr{
 					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 					MultiStages: MultiStageExpr{
@@ -1681,13 +2039,21 @@ func TestParse(t *testing.T) {
 					5*time.Minute,
 					newUnwrapExpr("foo", ""),
 					nil),
-				OpRangeTypeStdvar, nil, nil,
+				OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 			),
-		},
-		{
-			in: `stdvar_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration(foo) [5m])`,
-			exp: newRangeAggregationExpr(
+			OpTypeSum,
+			&Grouping{Without: true, Groups: []string{"foo"}},
+			nil,
+		),
+	},
+	{
+		in: `sum without (foo) (
+				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m] offset 5m
+								) by (namespace,instance)
+					)`,
+		exp: mustNewVectorAggregationExpr(
+			newRangeAggregationExpr(
 				newLogRange(&PipelineExpr{
 					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 					MultiStages: MultiStageExpr{
@@ -1710,184 +2076,23 @@ func TestParse(t *testing.T) {
 					},
 				},
 					5*time.Minute,
-					newUnwrapExpr("foo", OpConvDuration),
-					nil),
-				OpRangeTypeStdvar, nil, nil,
-			),
-		},
-		{
-			in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap bytes(foo) [5m])`,
-			exp: newRangeAggregationExpr(
-				newLogRange(&PipelineExpr{
-					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
-					MultiStages: MultiStageExpr{
-						newLineFilterExpr(labels.MatchEqual, "", "level=error"),
-						newLabelParserExpr(OpParserTypeJSON, ""),
-						&LabelFilterExpr{
-							LabelFilterer: log.NewAndLabelFilter(
-								log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, "foo", 5),
-								log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
-							),
-						},
-					},
-				},
-					5*time.Minute,
-					newUnwrapExpr("foo", OpConvBytes),
-					nil),
-				OpRangeTypeSum, nil, nil,
-			),
-		},
-		{
-			in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap bytes(foo) [5m] offset 5m)`,
-			exp: newRangeAggregationExpr(
-				newLogRange(&PipelineExpr{
-					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
-					MultiStages: MultiStageExpr{
-						newLineFilterExpr(labels.MatchEqual, "", "level=error"),
-						newLabelParserExpr(OpParserTypeJSON, ""),
-						&LabelFilterExpr{
-							LabelFilterer: log.NewAndLabelFilter(
-								log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, "foo", 5),
-								log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
-							),
-						},
-					},
-				},
-					5*time.Minute,
-					newUnwrapExpr("foo", OpConvBytes),
-					newOffsetExpr(5*time.Minute)),
-				OpRangeTypeSum, nil, nil,
-			),
-		},
-		{
-			in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
-			exp: newRangeAggregationExpr(
-				newLogRange(&PipelineExpr{
-					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
-					MultiStages: MultiStageExpr{
-						newLineFilterExpr(labels.MatchEqual, "", "level=error"),
-						newLabelParserExpr(OpParserTypeJSON, ""),
-						&LabelFilterExpr{
-							LabelFilterer: log.NewAndLabelFilter(
-								log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, "foo", 5),
-								log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
-							),
-						},
-					},
-				},
-					5*time.Minute,
-					newUnwrapExpr("latency", ""),
-					nil),
-				OpRangeTypeSum, nil, nil,
-			),
-		},
-		{
-			in: `sum_over_time({namespace="tns"} |= "level=error" | json |foo==5,bar<25ms| unwrap latency [5m])`,
-			exp: newRangeAggregationExpr(
-				newLogRange(&PipelineExpr{
-					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "namespace", Value: "tns"}}),
-					MultiStages: MultiStageExpr{
-						newLineFilterExpr(labels.MatchEqual, "", "level=error"),
-						newLabelParserExpr(OpParserTypeJSON, ""),
-						&LabelFilterExpr{
-							LabelFilterer: log.NewAndLabelFilter(
-								log.NewNumericLabelFilter(log.LabelFilterEqual, "foo", 5),
-								log.NewDurationLabelFilter(log.LabelFilterLesserThan, "bar", 25*time.Millisecond),
-							),
-						},
-					},
-				},
-					5*time.Minute,
-					newUnwrapExpr("latency", ""),
-					nil),
-				OpRangeTypeSum, nil, nil,
-			),
-		},
-		{
-			in: `stddev_over_time({app="foo"} |= "bar" | unwrap bar [5m])`,
-			exp: newRangeAggregationExpr(
-				newLogRange(&PipelineExpr{
-					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					MultiStages: MultiStageExpr{
-						newLineFilterExpr(labels.MatchEqual, "", "bar"),
-					},
-				},
-					5*time.Minute,
-					newUnwrapExpr("bar", ""),
-					nil),
-				OpRangeTypeStddev, nil, nil,
-			),
-		},
-		{
-			in: `min_over_time({app="foo"} | unwrap bar [5m])`,
-			exp: newRangeAggregationExpr(
-				newLogRange(
-					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					5*time.Minute,
-					newUnwrapExpr("bar", ""),
-					nil),
-				OpRangeTypeMin, nil, nil,
-			),
-		},
-		{
-			in: `min_over_time({app="foo"} | unwrap bar [5m]) by ()`,
-			exp: newRangeAggregationExpr(
-				newLogRange(
-					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					5*time.Minute,
-					newUnwrapExpr("bar", ""),
-					nil),
-				OpRangeTypeMin, &Grouping{}, nil,
-			),
-		},
-		{
-			in: `max_over_time({app="foo"} | unwrap bar [5m]) without ()`,
-			exp: newRangeAggregationExpr(
-				newLogRange(
-					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					5*time.Minute,
-					newUnwrapExpr("bar", ""),
-					nil),
-				OpRangeTypeMax, &Grouping{Without: true}, nil,
-			),
-		},
-		{
-			in: `max_over_time({app="foo"} | unwrap bar [5m]) without (foo,bar)`,
-			exp: newRangeAggregationExpr(
-				newLogRange(
-					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					5*time.Minute,
-					newUnwrapExpr("bar", ""),
-					nil),
-				OpRangeTypeMax, &Grouping{Without: true, Groups: []string{"foo", "bar"}}, nil,
-			),
-		},
-		{
-			in: `max_over_time({app="foo"} | unwrap bar [5m] offset 5m) without (foo,bar)`,
-			exp: newRangeAggregationExpr(
-				newLogRange(
-					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					5*time.Minute,
-					newUnwrapExpr("bar", ""),
+					newUnwrapExpr("foo", ""),
 					newOffsetExpr(5*time.Minute)),
-				OpRangeTypeMax, &Grouping{Without: true, Groups: []string{"foo", "bar"}}, nil,
-			),
-		},
-		{
-			in: `max_over_time({app="foo"} | unwrap bar [5m] offset -5m) without (foo,bar)`,
-			exp: newRangeAggregationExpr(
-				newLogRange(
-					newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-					5*time.Minute,
-					newUnwrapExpr("bar", ""),
-					newOffsetExpr(-5*time.Minute)),
-				OpRangeTypeMax, &Grouping{Without: true, Groups: []string{"foo", "bar"}}, nil,
+				OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 			),
-		},
-		{
-			in: `max_over_time(({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo )[5m])`,
-			exp: newRangeAggregationExpr(
+			OpTypeSum,
+			&Grouping{Without: true, Groups: []string{"foo"}},
+			nil,
+		),
+	},
+	{
+		in: `sum without (foo) (
+			quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration(foo) [5m]
+							) by (namespace,instance)
+				)`,
+		exp: mustNewVectorAggregationExpr(
+			newRangeAggregationExpr(
 				newLogRange(&PipelineExpr{
 					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 					MultiStages: MultiStageExpr{
@@ -1910,15 +2115,23 @@ func TestParse(t *testing.T) {
 					},
 				},
 					5*time.Minute,
-					newUnwrapExpr("foo", ""),
+					newUnwrapExpr("foo", OpConvDuration),
 					nil),
-				OpRangeTypeMax, nil, nil,
+				OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 			),
-		},
-		{
-			in: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m])`,
-			exp: newRangeAggregationExpr(
+			OpTypeSum,
+			&Grouping{Without: true, Groups: []string{"foo"}},
+			nil,
+		),
+	},
+	{
+		in: `sum without (foo) (
+			quantile_over_time(.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration(foo) [5m]
+							) by (namespace,instance)
+				)`,
+		exp: mustNewVectorAggregationExpr(
+			newRangeAggregationExpr(
 				newLogRange(&PipelineExpr{
 					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 					MultiStages: MultiStageExpr{
@@ -1941,15 +2154,23 @@ func TestParse(t *testing.T) {
 					},
 				},
 					5*time.Minute,
-					newUnwrapExpr("foo", ""),
+					newUnwrapExpr("foo", OpConvDuration),
 					nil),
-				OpRangeTypeQuantile, nil, NewStringLabelFilter("0.99998"),
+				OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter(".99998"),
 			),
-		},
-		{
-			in: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]) by (namespace,instance)`,
-			exp: newRangeAggregationExpr(
+			OpTypeSum,
+			&Grouping{Without: true, Groups: []string{"foo"}},
+			nil,
+		),
+	},
+	{
+		in: `sum without (foo) (
+			quantile_over_time(.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration_seconds(foo) [5m]
+							) by (namespace,instance)
+				)`,
+		exp: mustNewVectorAggregationExpr(
+			newRangeAggregationExpr(
 				newLogRange(&PipelineExpr{
 					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 					MultiStages: MultiStageExpr{
@@ -1972,15 +2193,23 @@ func TestParse(t *testing.T) {
 					},
 				},
 					5*time.Minute,
-					newUnwrapExpr("foo", ""),
+					newUnwrapExpr("foo", OpConvDurationSeconds),
 					nil),
-				OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
+				OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter(".99998"),
 			),
-		},
-		{
-			in: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-			| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo | __error__ !~".+"[5m]) by (namespace,instance)`,
-			exp: newRangeAggregationExpr(
+			OpTypeSum,
+			&Grouping{Without: true, Groups: []string{"foo"}},
+			nil,
+		),
+	},
+	{
+		in: `topk(10,
+				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
+								) by (namespace,instance)
+					)`,
+		exp: mustNewVectorAggregationExpr(
+			newRangeAggregationExpr(
 				newLogRange(&PipelineExpr{
 					Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
 					MultiStages: MultiStageExpr{
@@ -2003,57 +2232,33 @@ func TestParse(t *testing.T) {
 					},
 				},
 					5*time.Minute,
-					newUnwrapExpr("foo", "").addPostFilter(log.NewStringLabelFilter(mustNewMatcher(labels.MatchNotRegexp, logqlmodel.ErrorLabel, ".+"))),
+					newUnwrapExpr("foo", ""),
 					nil),
 				OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 			),
-		},
-		{
-			in: `sum without (foo) (
+			OpTypeTopK,
+			nil,
+			NewStringLabelFilter("10"),
+		),
+	},
+	{
+		in: `
+			sum by (foo,bar) (
 				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
 					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
 								) by (namespace,instance)
-					)`,
-			exp: mustNewVectorAggregationExpr(
-				newRangeAggregationExpr(
-					newLogRange(&PipelineExpr{
-						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-						MultiStages: MultiStageExpr{
-							newLineFilterExpr(labels.MatchEqual, "", "bar"),
-							newLabelParserExpr(OpParserTypeJSON, ""),
-							&LabelFilterExpr{
-								LabelFilterer: log.NewOrLabelFilter(
-									log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-									log.NewAndLabelFilter(
-										log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-										log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-									),
-								),
-							},
-							newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-							newLabelFmtExpr([]log.LabelFmt{
-								log.NewRenameLabelFmt("foo", "bar"),
-								log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-							}),
-						},
-					},
-						5*time.Minute,
-						newUnwrapExpr("foo", ""),
-						nil),
-					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
-				),
-				OpTypeSum,
-				&Grouping{Without: true, Groups: []string{"foo"}},
-				nil,
-			),
-		},
-		{
-			in: `sum without (foo) (
-				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m] offset 5m
-								) by (namespace,instance)
-					)`,
-			exp: mustNewVectorAggregationExpr(
+					)
+					+
+					avg(
+						avg_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+							| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
+										) by (namespace,instance)
+							) by (foo,bar)
+					`,
+		exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{
+			VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: false,
+		},
+			mustNewVectorAggregationExpr(
 				newRangeAggregationExpr(
 					newLogRange(&PipelineExpr{
 						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
@@ -2078,21 +2283,14 @@ func TestParse(t *testing.T) {
 					},
 						5*time.Minute,
 						newUnwrapExpr("foo", ""),
-						newOffsetExpr(5*time.Minute)),
+						nil),
 					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 				),
 				OpTypeSum,
-				&Grouping{Without: true, Groups: []string{"foo"}},
+				&Grouping{Groups: []string{"foo", "bar"}},
 				nil,
 			),
-		},
-		{
-			in: `sum without (foo) (
-			quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration(foo) [5m]
-							) by (namespace,instance)
-				)`,
-			exp: mustNewVectorAggregationExpr(
+			mustNewVectorAggregationExpr(
 				newRangeAggregationExpr(
 					newLogRange(&PipelineExpr{
 						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
@@ -2116,22 +2314,32 @@ func TestParse(t *testing.T) {
 						},
 					},
 						5*time.Minute,
-						newUnwrapExpr("foo", OpConvDuration),
+						newUnwrapExpr("foo", ""),
 						nil),
-					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
+					OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
 				),
-				OpTypeSum,
-				&Grouping{Without: true, Groups: []string{"foo"}},
+				OpTypeAvg,
+				&Grouping{Groups: []string{"foo", "bar"}},
 				nil,
 			),
-		},
-		{
-			in: `sum without (foo) (
-			quantile_over_time(.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration(foo) [5m]
-							) by (namespace,instance)
-				)`,
-			exp: mustNewVectorAggregationExpr(
+		),
+	},
+	{
+		in: `
+			sum by (foo,bar) (
+				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
+								) by (namespace,instance)
+					)
+					+ ignoring (bar)
+					avg(
+						avg_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+							| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
+										) by (namespace,instance)
+							) by (foo)
+					`,
+		exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardOneToOne, On: false, MatchingLabels: []string{"bar"}}},
+			mustNewVectorAggregationExpr(
 				newRangeAggregationExpr(
 					newLogRange(&PipelineExpr{
 						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
@@ -2155,22 +2363,15 @@ func TestParse(t *testing.T) {
 						},
 					},
 						5*time.Minute,
-						newUnwrapExpr("foo", OpConvDuration),
+						newUnwrapExpr("foo", ""),
 						nil),
-					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter(".99998"),
+					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 				),
 				OpTypeSum,
-				&Grouping{Without: true, Groups: []string{"foo"}},
+				&Grouping{Groups: []string{"foo", "bar"}},
 				nil,
 			),
-		},
-		{
-			in: `sum without (foo) (
-			quantile_over_time(.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap duration_seconds(foo) [5m]
-							) by (namespace,instance)
-				)`,
-			exp: mustNewVectorAggregationExpr(
+			mustNewVectorAggregationExpr(
 				newRangeAggregationExpr(
 					newLogRange(&PipelineExpr{
 						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
@@ -2194,22 +2395,32 @@ func TestParse(t *testing.T) {
 						},
 					},
 						5*time.Minute,
-						newUnwrapExpr("foo", OpConvDurationSeconds),
+						newUnwrapExpr("foo", ""),
 						nil),
-					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter(".99998"),
+					OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
 				),
-				OpTypeSum,
-				&Grouping{Without: true, Groups: []string{"foo"}},
+				OpTypeAvg,
+				&Grouping{Groups: []string{"foo"}},
 				nil,
 			),
-		},
-		{
-			in: `topk(10,
+		),
+	},
+	{
+		in: `
+			sum by (foo,bar) (
 				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
 					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
 								) by (namespace,instance)
-					)`,
-			exp: mustNewVectorAggregationExpr(
+					)
+					+ on (foo)
+					avg(
+						avg_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+							| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
+										) by (namespace,instance)
+							) by (foo)
+					`,
+		exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardOneToOne, On: true, MatchingLabels: []string{"foo"}}},
+			mustNewVectorAggregationExpr(
 				newRangeAggregationExpr(
 					newLogRange(&PipelineExpr{
 						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
@@ -2229,266 +2440,54 @@ func TestParse(t *testing.T) {
 							newLabelFmtExpr([]log.LabelFmt{
 								log.NewRenameLabelFmt("foo", "bar"),
 								log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-							}),
-						},
-					},
-						5*time.Minute,
-						newUnwrapExpr("foo", ""),
-						nil),
-					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
-				),
-				OpTypeTopK,
-				nil,
-				NewStringLabelFilter("10"),
-			),
-		},
-		{
-			in: `
-			sum by (foo,bar) (
-				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
-								) by (namespace,instance)
-					)
-					+
-					avg(
-						avg_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-							| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
-										) by (namespace,instance)
-							) by (foo,bar)
-					`,
-			exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{
-				VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: false,
-			},
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
-									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
-							},
-						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"foo", "bar"}},
-					nil,
-				),
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
-									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
-							},
-						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
-					),
-					OpTypeAvg,
-					&Grouping{Groups: []string{"foo", "bar"}},
-					nil,
-				),
-			),
-		},
-		{
-			in: `
-			sum by (foo,bar) (
-				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
-								) by (namespace,instance)
-					)
-					+ ignoring (bar)
-					avg(
-						avg_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-							| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
-										) by (namespace,instance)
-							) by (foo)
-					`,
-			exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardOneToOne, On: false, MatchingLabels: []string{"bar"}}},
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
-									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
-							},
-						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"foo", "bar"}},
-					nil,
-				),
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
-									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
-							},
-						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
-					),
-					OpTypeAvg,
-					&Grouping{Groups: []string{"foo"}},
-					nil,
-				),
-			),
-		},
-		{
-			in: `
-			sum by (foo,bar) (
-				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
-								) by (namespace,instance)
-					)
-					+ on (foo)
-					avg(
-						avg_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-							| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
-										) by (namespace,instance)
-							) by (foo)
-					`,
-			exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardOneToOne, On: true, MatchingLabels: []string{"foo"}}},
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
-									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
-							},
+							}),
 						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"foo", "bar"}},
-					nil,
+					},
+						5*time.Minute,
+						newUnwrapExpr("foo", ""),
+						nil),
+					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 				),
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
+				OpTypeSum,
+				&Grouping{Groups: []string{"foo", "bar"}},
+				nil,
+			),
+			mustNewVectorAggregationExpr(
+				newRangeAggregationExpr(
+					newLogRange(&PipelineExpr{
+						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+						MultiStages: MultiStageExpr{
+							newLineFilterExpr(labels.MatchEqual, "", "bar"),
+							newLabelParserExpr(OpParserTypeJSON, ""),
+							&LabelFilterExpr{
+								LabelFilterer: log.NewOrLabelFilter(
+									log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+									log.NewAndLabelFilter(
+										log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+										log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
 									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
+								),
 							},
+							newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+							newLabelFmtExpr([]log.LabelFmt{
+								log.NewRenameLabelFmt("foo", "bar"),
+								log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+							}),
 						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
-					),
-					OpTypeAvg,
-					&Grouping{Groups: []string{"foo"}},
-					nil,
+					},
+						5*time.Minute,
+						newUnwrapExpr("foo", ""),
+						nil),
+					OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
 				),
+				OpTypeAvg,
+				&Grouping{Groups: []string{"foo"}},
+				nil,
 			),
-		},
-		{
-			in: `
+		),
+	},
+	{
+		in: `
 			sum by (foo,bar) (
 				quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
 					| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m]
@@ -2501,137 +2500,137 @@ func TestParse(t *testing.T) {
 										) by (namespace,instance)
 							) by (foo)
 					`,
-			exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardManyToOne, Include: []string{"foo"}, On: false, MatchingLabels: []string{"bar"}}},
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
+		exp: mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{ReturnBool: false, VectorMatching: &VectorMatching{Card: CardManyToOne, Include: []string{"foo"}, On: false, MatchingLabels: []string{"bar"}}},
+			mustNewVectorAggregationExpr(
+				newRangeAggregationExpr(
+					newLogRange(&PipelineExpr{
+						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+						MultiStages: MultiStageExpr{
+							newLineFilterExpr(labels.MatchEqual, "", "bar"),
+							newLabelParserExpr(OpParserTypeJSON, ""),
+							&LabelFilterExpr{
+								LabelFilterer: log.NewOrLabelFilter(
+									log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+									log.NewAndLabelFilter(
+										log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+										log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
 									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
+								),
 							},
+							newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+							newLabelFmtExpr([]log.LabelFmt{
+								log.NewRenameLabelFmt("foo", "bar"),
+								log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+							}),
 						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"foo", "bar"}},
-					nil,
+					},
+						5*time.Minute,
+						newUnwrapExpr("foo", ""),
+						nil),
+					OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 				),
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						newLogRange(&PipelineExpr{
-							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							MultiStages: MultiStageExpr{
-								newLineFilterExpr(labels.MatchEqual, "", "bar"),
-								newLabelParserExpr(OpParserTypeJSON, ""),
-								&LabelFilterExpr{
-									LabelFilterer: log.NewOrLabelFilter(
-										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-										log.NewAndLabelFilter(
-											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-										),
+				OpTypeSum,
+				&Grouping{Groups: []string{"foo", "bar"}},
+				nil,
+			),
+			mustNewVectorAggregationExpr(
+				newRangeAggregationExpr(
+					newLogRange(&PipelineExpr{
+						Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+						MultiStages: MultiStageExpr{
+							newLineFilterExpr(labels.MatchEqual, "", "bar"),
+							newLabelParserExpr(OpParserTypeJSON, ""),
+							&LabelFilterExpr{
+								LabelFilterer: log.NewOrLabelFilter(
+									log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+									log.NewAndLabelFilter(
+										log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+										log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
 									),
-								},
-								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-								newLabelFmtExpr([]log.LabelFmt{
-									log.NewRenameLabelFmt("foo", "bar"),
-									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-								}),
+								),
 							},
+							newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+							newLabelFmtExpr([]log.LabelFmt{
+								log.NewRenameLabelFmt("foo", "bar"),
+								log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+							}),
 						},
-							5*time.Minute,
-							newUnwrapExpr("foo", ""),
-							nil),
-						OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
-					),
-					OpTypeAvg,
-					&Grouping{Groups: []string{"foo"}},
-					nil,
+					},
+						5*time.Minute,
+						newUnwrapExpr("foo", ""),
+						nil),
+					OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
 				),
+				OpTypeAvg,
+				&Grouping{Groups: []string{"foo"}},
+				nil,
 			),
-		},
-		{
-			in: `
+		),
+	},
+	{
+		in: `
 			sum by (app,machine) (count_over_time({app="foo"}[1m])) > bool on () group_right (app) sum by (app) (count_over_time({app="foo"}[1m]))
 					`,
-			exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: []string{"app"}, On: true, MatchingLabels: nil}},
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						&LogRange{
-							Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							Interval: 1 * time.Minute,
-						},
-						OpRangeTypeCount, nil, nil,
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"app", "machine"}},
-					nil,
+		exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: []string{"app"}, On: true, MatchingLabels: nil}},
+			mustNewVectorAggregationExpr(
+				newRangeAggregationExpr(
+					&LogRange{
+						Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+						Interval: 1 * time.Minute,
+					},
+					OpRangeTypeCount, nil, nil,
 				),
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						&LogRange{
-							Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							Interval: 1 * time.Minute,
-						},
-						OpRangeTypeCount, nil, nil,
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"app"}},
-					nil,
+				OpTypeSum,
+				&Grouping{Groups: []string{"app", "machine"}},
+				nil,
+			),
+			mustNewVectorAggregationExpr(
+				newRangeAggregationExpr(
+					&LogRange{
+						Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+						Interval: 1 * time.Minute,
+					},
+					OpRangeTypeCount, nil, nil,
 				),
+				OpTypeSum,
+				&Grouping{Groups: []string{"app"}},
+				nil,
 			),
-		},
-		{
-			in: `
+		),
+	},
+	{
+		in: `
 			sum by (app,machine) (count_over_time({app="foo"}[1m])) > bool on () group_right sum by (app) (count_over_time({app="foo"}[1m]))
 					`,
-			exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: nil, On: true, MatchingLabels: nil}},
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						&LogRange{
-							Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							Interval: 1 * time.Minute,
-						},
-						OpRangeTypeCount, nil, nil,
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"app", "machine"}},
-					nil,
+		exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: nil, On: true, MatchingLabels: nil}},
+			mustNewVectorAggregationExpr(
+				newRangeAggregationExpr(
+					&LogRange{
+						Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+						Interval: 1 * time.Minute,
+					},
+					OpRangeTypeCount, nil, nil,
 				),
-				mustNewVectorAggregationExpr(
-					newRangeAggregationExpr(
-						&LogRange{
-							Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-							Interval: 1 * time.Minute,
-						},
-						OpRangeTypeCount, nil, nil,
-					),
-					OpTypeSum,
-					&Grouping{Groups: []string{"app"}},
-					nil,
+				OpTypeSum,
+				&Grouping{Groups: []string{"app", "machine"}},
+				nil,
+			),
+			mustNewVectorAggregationExpr(
+				newRangeAggregationExpr(
+					&LogRange{
+						Left:     newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+						Interval: 1 * time.Minute,
+					},
+					OpRangeTypeCount, nil, nil,
 				),
+				OpTypeSum,
+				&Grouping{Groups: []string{"app"}},
+				nil,
 			),
-		},
-		{
-			in: `
+		),
+	},
+	{
+		in: `
 			label_replace(
 				sum by (foo,bar) (
 					quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
@@ -2649,194 +2648,207 @@ func TestParse(t *testing.T) {
 				"svc",
 				"(.*)"
 				)`,
-			exp: mustNewLabelReplaceExpr(
-				mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: false},
-					mustNewVectorAggregationExpr(
-						newRangeAggregationExpr(
-							newLogRange(&PipelineExpr{
-								Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-								MultiStages: MultiStageExpr{
-									newLineFilterExpr(labels.MatchEqual, "", "bar"),
-									newLabelParserExpr(OpParserTypeJSON, ""),
-									&LabelFilterExpr{
-										LabelFilterer: log.NewOrLabelFilter(
-											log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-											log.NewAndLabelFilter(
-												log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-												log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-											),
+		exp: mustNewLabelReplaceExpr(
+			mustNewBinOpExpr(OpTypeAdd, &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: false},
+				mustNewVectorAggregationExpr(
+					newRangeAggregationExpr(
+						newLogRange(&PipelineExpr{
+							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+							MultiStages: MultiStageExpr{
+								newLineFilterExpr(labels.MatchEqual, "", "bar"),
+								newLabelParserExpr(OpParserTypeJSON, ""),
+								&LabelFilterExpr{
+									LabelFilterer: log.NewOrLabelFilter(
+										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+										log.NewAndLabelFilter(
+											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
 										),
-									},
-									newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-									newLabelFmtExpr([]log.LabelFmt{
-										log.NewRenameLabelFmt("foo", "bar"),
-										log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-									}),
+									),
 								},
+								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+								newLabelFmtExpr([]log.LabelFmt{
+									log.NewRenameLabelFmt("foo", "bar"),
+									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+								}),
 							},
-								5*time.Minute,
-								newUnwrapExpr("foo", ""),
-								nil),
-							OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
-						),
-						OpTypeSum,
-						&Grouping{Groups: []string{"foo", "bar"}},
-						nil,
+						},
+							5*time.Minute,
+							newUnwrapExpr("foo", ""),
+							nil),
+						OpRangeTypeQuantile, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, NewStringLabelFilter("0.99998"),
 					),
-					mustNewVectorAggregationExpr(
-						newRangeAggregationExpr(
-							newLogRange(&PipelineExpr{
-								Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-								MultiStages: MultiStageExpr{
-									newLineFilterExpr(labels.MatchEqual, "", "bar"),
-									newLabelParserExpr(OpParserTypeJSON, ""),
-									&LabelFilterExpr{
-										LabelFilterer: log.NewOrLabelFilter(
-											log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
-											log.NewAndLabelFilter(
-												log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
-												log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
-											),
+					OpTypeSum,
+					&Grouping{Groups: []string{"foo", "bar"}},
+					nil,
+				),
+				mustNewVectorAggregationExpr(
+					newRangeAggregationExpr(
+						newLogRange(&PipelineExpr{
+							Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+							MultiStages: MultiStageExpr{
+								newLineFilterExpr(labels.MatchEqual, "", "bar"),
+								newLabelParserExpr(OpParserTypeJSON, ""),
+								&LabelFilterExpr{
+									LabelFilterer: log.NewOrLabelFilter(
+										log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, "latency", 250*time.Millisecond),
+										log.NewAndLabelFilter(
+											log.NewNumericLabelFilter(log.LabelFilterLesserThan, "status_code", 500.0),
+											log.NewNumericLabelFilter(log.LabelFilterGreaterThan, "status_code", 200.0),
 										),
-									},
-									newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
-									newLabelFmtExpr([]log.LabelFmt{
-										log.NewRenameLabelFmt("foo", "bar"),
-										log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
-									}),
+									),
 								},
+								newLineFmtExpr("blip{{ .foo }}blop {{.status_code}}"),
+								newLabelFmtExpr([]log.LabelFmt{
+									log.NewRenameLabelFmt("foo", "bar"),
+									log.NewTemplateLabelFmt("status_code", "buzz{{.bar}}"),
+								}),
 							},
-								5*time.Minute,
-								newUnwrapExpr("foo", ""),
-								nil),
-							OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
-						),
-						OpTypeAvg,
-						&Grouping{Groups: []string{"foo", "bar"}},
-						nil,
+						},
+							5*time.Minute,
+							newUnwrapExpr("foo", ""),
+							nil),
+						OpRangeTypeAvg, &Grouping{Without: false, Groups: []string{"namespace", "instance"}}, nil,
 					),
+					OpTypeAvg,
+					&Grouping{Groups: []string{"foo", "bar"}},
+					nil,
 				),
-				"foo", "$1", "svc", "(.*)",
 			),
-		},
-		{
-			// ensure binary ops with two literals are reduced recursively
-			in:  `1 + 1 + 1`,
-			exp: &LiteralExpr{Val: 3},
-		},
-		{
-			// ensure binary ops with two literals are reduced when comparisons are used
-			in:  `1 == 1`,
-			exp: &LiteralExpr{Val: 1},
-		},
-		{
-			// ensure binary ops with two literals are reduced when comparisons are used
-			in:  `1 != 1`,
-			exp: &LiteralExpr{Val: 0},
-		},
-		{
-			// ensure binary ops with two literals are reduced when comparisons are used
-			in:  `1 > 1`,
-			exp: &LiteralExpr{Val: 0},
-		},
-		{
-			// ensure binary ops with two literals are reduced when comparisons are used
-			in:  `1 >= 1`,
-			exp: &LiteralExpr{Val: 1},
-		},
-		{
-			// ensure binary ops with two literals are reduced when comparisons are used
-			in:  `1 < 1`,
-			exp: &LiteralExpr{Val: 0},
-		},
-		{
-			// ensure binary ops with two literals are reduced when comparisons are used
-			in:  `1 <= 1`,
-			exp: &LiteralExpr{Val: 1},
-		},
-		{
-			// ensure binary ops with two literals are reduced recursively when comparisons are used
-			in:  `1 >= 1 > 1`,
-			exp: &LiteralExpr{Val: 0},
-		},
-		{
-			in:  `{foo="bar"} + {foo="bar"}`,
-			err: logqlmodel.NewParseError(`unexpected type for left leg of binary operation (+): *syntax.MatchersExpr`, 0, 0),
-		},
-		{
-			in:  `sum(count_over_time({foo="bar"}[5m])) by (foo) - {foo="bar"}`,
-			err: logqlmodel.NewParseError(`unexpected type for right leg of binary operation (-): *syntax.MatchersExpr`, 0, 0),
-		},
-		{
-			in:  `{foo="bar"} / sum(count_over_time({foo="bar"}[5m])) by (foo)`,
-			err: logqlmodel.NewParseError(`unexpected type for left leg of binary operation (/): *syntax.MatchersExpr`, 0, 0),
-		},
-		{
-			in:  `sum(count_over_time({foo="bar"}[5m])) by (foo) or 1`,
-			err: logqlmodel.NewParseError(`unexpected literal for right leg of logical/set binary operation (or): 1.000000`, 0, 0),
-		},
-		{
-			in:  `1 unless sum(count_over_time({foo="bar"}[5m])) by (foo)`,
-			err: logqlmodel.NewParseError(`unexpected literal for left leg of logical/set binary operation (unless): 1.000000`, 0, 0),
-		},
-		{
-			in:  `sum(count_over_time({foo="bar"}[5m])) by (foo) + 1 or 1`,
-			err: logqlmodel.NewParseError(`unexpected literal for right leg of logical/set binary operation (or): 1.000000`, 0, 0),
-		},
-		{
-			in: `count_over_time({ foo ="bar" }[12m]) > count_over_time({ foo = "bar" }[12m])`,
-			exp: &BinOpExpr{
-				Op: OpTypeGT,
-				Opts: &BinOpOptions{
-					ReturnBool:     false,
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
-				},
-				SampleExpr: &RangeAggregationExpr{
-					Left: &LogRange{
-						Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-						Interval: 12 * time.Minute,
-					},
-					Operation: "count_over_time",
+			"foo", "$1", "svc", "(.*)",
+		),
+	},
+	{
+		// ensure binary ops with two literals are reduced recursively
+		in:  `1 + 1 + 1`,
+		exp: &LiteralExpr{Val: 3},
+	},
+	{
+		// ensure binary ops with two literals are reduced when comparisons are used
+		in:  `1 == 1`,
+		exp: &LiteralExpr{Val: 1},
+	},
+	{
+		// ensure binary ops with two literals are reduced when comparisons are used
+		in:  `1 != 1`,
+		exp: &LiteralExpr{Val: 0},
+	},
+	{
+		// ensure binary ops with two literals are reduced when comparisons are used
+		in:  `1 > 1`,
+		exp: &LiteralExpr{Val: 0},
+	},
+	{
+		// ensure binary ops with two literals are reduced when comparisons are used
+		in:  `1 >= 1`,
+		exp: &LiteralExpr{Val: 1},
+	},
+	{
+		// ensure binary ops with two literals are reduced when comparisons are used
+		in:  `1 < 1`,
+		exp: &LiteralExpr{Val: 0},
+	},
+	{
+		// ensure binary ops with two literals are reduced when comparisons are used
+		in:  `1 <= 1`,
+		exp: &LiteralExpr{Val: 1},
+	},
+	{
+		// ensure binary ops with two literals are reduced recursively when comparisons are used
+		in:  `1 >= 1 > 1`,
+		exp: &LiteralExpr{Val: 0},
+	},
+	{
+		in:  `{foo="bar"} + {foo="bar"}`,
+		err: logqlmodel.NewParseError(`unexpected type for left leg of binary operation (+): *syntax.MatchersExpr`, 0, 0),
+	},
+	{
+		in:  `sum(count_over_time({foo="bar"}[5m])) by (foo) - {foo="bar"}`,
+		err: logqlmodel.NewParseError(`unexpected type for right leg of binary operation (-): *syntax.MatchersExpr`, 0, 0),
+	},
+	{
+		in:  `{foo="bar"} / sum(count_over_time({foo="bar"}[5m])) by (foo)`,
+		err: logqlmodel.NewParseError(`unexpected type for left leg of binary operation (/): *syntax.MatchersExpr`, 0, 0),
+	},
+	{
+		in:  `sum(count_over_time({foo="bar"}[5m])) by (foo) or 1`,
+		err: logqlmodel.NewParseError(`unexpected literal for right leg of logical/set binary operation (or): 1.000000`, 0, 0),
+	},
+	{
+		in:  `1 unless sum(count_over_time({foo="bar"}[5m])) by (foo)`,
+		err: logqlmodel.NewParseError(`unexpected literal for left leg of logical/set binary operation (unless): 1.000000`, 0, 0),
+	},
+	{
+		in:  `sum(count_over_time({foo="bar"}[5m])) by (foo) + 1 or 1`,
+		err: logqlmodel.NewParseError(`unexpected literal for right leg of logical/set binary operation (or): 1.000000`, 0, 0),
+	},
+	{
+		in: `count_over_time({ foo ="bar" }[12m]) > count_over_time({ foo = "bar" }[12m])`,
+		exp: &BinOpExpr{
+			Op: OpTypeGT,
+			Opts: &BinOpOptions{
+				ReturnBool:     false,
+				VectorMatching: &VectorMatching{Card: CardOneToOne},
+			},
+			SampleExpr: &RangeAggregationExpr{
+				Left: &LogRange{
+					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+					Interval: 12 * time.Minute,
 				},
-				RHS: &RangeAggregationExpr{
-					Left: &LogRange{
-						Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-						Interval: 12 * time.Minute,
-					},
-					Operation: "count_over_time",
+				Operation: "count_over_time",
+			},
+			RHS: &RangeAggregationExpr{
+				Left: &LogRange{
+					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+					Interval: 12 * time.Minute,
 				},
+				Operation: "count_over_time",
 			},
 		},
-		{
-			in: `count_over_time({ foo = "bar" }[12m]) > 1`,
-			exp: &BinOpExpr{
-				Op: OpTypeGT,
-				Opts: &BinOpOptions{
-					ReturnBool:     false,
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
+	},
+	{
+		in: `count_over_time({ foo = "bar" }[12m]) > 1`,
+		exp: &BinOpExpr{
+			Op: OpTypeGT,
+			Opts: &BinOpOptions{
+				ReturnBool:     false,
+				VectorMatching: &VectorMatching{Card: CardOneToOne},
+			},
+			SampleExpr: &RangeAggregationExpr{
+				Left: &LogRange{
+					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+					Interval: 12 * time.Minute,
 				},
-				SampleExpr: &RangeAggregationExpr{
-					Left: &LogRange{
-						Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-						Interval: 12 * time.Minute,
-					},
-					Operation: "count_over_time",
+				Operation: "count_over_time",
+			},
+			RHS: &LiteralExpr{Val: 1},
+		},
+	},
+	{
+		// cannot compare metric & log queries
+		in:  `count_over_time({ foo = "bar" }[12m]) > { foo = "bar" }`,
+		err: logqlmodel.NewParseError("unexpected type for right leg of binary operation (>): *syntax.MatchersExpr", 0, 0),
+	},
+	{
+		in: `count_over_time({ foo = "bar" }[12m]) or count_over_time({ foo = "bar" }[12m]) > 1`,
+		exp: &BinOpExpr{
+			Op: OpTypeOr,
+			Opts: &BinOpOptions{
+				ReturnBool:     false,
+				VectorMatching: &VectorMatching{},
+			},
+			SampleExpr: &RangeAggregationExpr{
+				Left: &LogRange{
+					Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+					Interval: 12 * time.Minute,
 				},
-				RHS: &LiteralExpr{Val: 1},
+				Operation: "count_over_time",
 			},
-		},
-		{
-			// cannot compare metric & log queries
-			in:  `count_over_time({ foo = "bar" }[12m]) > { foo = "bar" }`,
-			err: logqlmodel.NewParseError("unexpected type for right leg of binary operation (>): *syntax.MatchersExpr", 0, 0),
-		},
-		{
-			in: `count_over_time({ foo = "bar" }[12m]) or count_over_time({ foo = "bar" }[12m]) > 1`,
-			exp: &BinOpExpr{
-				Op: OpTypeOr,
+			RHS: &BinOpExpr{
+				Op: OpTypeGT,
 				Opts: &BinOpOptions{
 					ReturnBool:     false,
-					VectorMatching: &VectorMatching{},
+					VectorMatching: &VectorMatching{Card: CardOneToOne},
 				},
 				SampleExpr: &RangeAggregationExpr{
 					Left: &LogRange{
@@ -2845,301 +2857,291 @@ func TestParse(t *testing.T) {
 					},
 					Operation: "count_over_time",
 				},
-				RHS: &BinOpExpr{
-					Op: OpTypeGT,
-					Opts: &BinOpOptions{
-						ReturnBool:     false,
-						VectorMatching: &VectorMatching{Card: CardOneToOne},
-					},
-					SampleExpr: &RangeAggregationExpr{
-						Left: &LogRange{
-							Left:     &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
-							Interval: 12 * time.Minute,
-						},
-						Operation: "count_over_time",
-					},
-					RHS: &LiteralExpr{Val: 1},
-				},
-			},
-		},
-		{
-			// test associativity
-			in:  `1 > 1 < 1`,
-			exp: &LiteralExpr{Val: 1},
-		},
-		{
-			// bool modifiers are reduced-away between two literal legs
-			in:  `1 > 1 > bool 1`,
-			exp: &LiteralExpr{Val: 0},
-		},
-		{
-			// cannot lead with bool modifier
-			in:  `bool 1 > 1 > bool 1`,
-			err: logqlmodel.NewParseError("syntax error: unexpected bool", 1, 1),
-		},
-		{
-			in:  `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m]) by (foo)`,
-			err: logqlmodel.NewParseError("grouping not allowed for sum_over_time aggregation", 0, 0),
-		},
-		{
-			in:  `sum_over_time(50,{namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
-			err: logqlmodel.NewParseError("parameter 50 not supported for operation sum_over_time", 0, 0),
-		},
-		{
-			in:  `quantile_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
-			err: logqlmodel.NewParseError("parameter required for operation quantile_over_time", 0, 0),
-		},
-		{
-			in:  `quantile_over_time(foo,{namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
-			err: logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER, expecting NUMBER or { or (", 1, 20),
-		},
-		{
-			in:  `vector(abc)`,
-			err: logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER, expecting NUMBER", 1, 8),
-		},
-		{
-			in:  `vector(1)`,
-			exp: &VectorExpr{Val: 1, err: nil},
-		},
-		{
-			in:  `label_replace(vector(0), "foo", "bar", "", "")`,
-			exp: mustNewLabelReplaceExpr(&VectorExpr{Val: 0, err: nil}, "foo", "bar", "", ""),
-		},
-		{
-			in: `sum(vector(0))`,
-			exp: &VectorAggregationExpr{
-				Left:      &VectorExpr{Val: 0, err: nil},
-				Grouping:  &Grouping{},
-				Params:    0,
-				Operation: "sum",
+				RHS: &LiteralExpr{Val: 1},
 			},
 		},
-		{
-			in: `{app="foo"}
+	},
+	{
+		// test associativity
+		in:  `1 > 1 < 1`,
+		exp: &LiteralExpr{Val: 1},
+	},
+	{
+		// bool modifiers are reduced-away between two literal legs
+		in:  `1 > 1 > bool 1`,
+		exp: &LiteralExpr{Val: 0},
+	},
+	{
+		// cannot lead with bool modifier
+		in:  `bool 1 > 1 > bool 1`,
+		err: logqlmodel.NewParseError("syntax error: unexpected bool", 1, 1),
+	},
+	{
+		in:  `sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m]) by (foo)`,
+		err: logqlmodel.NewParseError("grouping not allowed for sum_over_time aggregation", 0, 0),
+	},
+	{
+		in:  `sum_over_time(50,{namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
+		err: logqlmodel.NewParseError("parameter 50 not supported for operation sum_over_time", 0, 0),
+	},
+	{
+		in:  `quantile_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
+		err: logqlmodel.NewParseError("parameter required for operation quantile_over_time", 0, 0),
+	},
+	{
+		in:  `quantile_over_time(foo,{namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms| unwrap latency [5m])`,
+		err: logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER, expecting NUMBER or { or (", 1, 20),
+	},
+	{
+		in:  `vector(abc)`,
+		err: logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER, expecting NUMBER", 1, 8),
+	},
+	{
+		in:  `vector(1)`,
+		exp: &VectorExpr{Val: 1, err: nil},
+	},
+	{
+		in:  `label_replace(vector(0), "foo", "bar", "", "")`,
+		exp: mustNewLabelReplaceExpr(&VectorExpr{Val: 0, err: nil}, "foo", "bar", "", ""),
+	},
+	{
+		in: `sum(vector(0))`,
+		exp: &VectorAggregationExpr{
+			Left:      &VectorExpr{Val: 0, err: nil},
+			Grouping:  &Grouping{},
+			Params:    0,
+			Operation: "sum",
+		},
+	},
+	{
+		in: `{app="foo"}
 					# |= "bar"
 					| json`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLabelParserExpr(OpParserTypeJSON, ""),
-				},
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLabelParserExpr(OpParserTypeJSON, ""),
 			},
 		},
-		{
-			in: `{app="foo"}
+	},
+	{
+		in: `{app="foo"}
 					#
 					|= "bar"
 					| json`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLineFilterExpr(labels.MatchEqual, "", "bar"),
-					newLabelParserExpr(OpParserTypeJSON, ""),
-				},
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "bar"),
+				newLabelParserExpr(OpParserTypeJSON, ""),
 			},
 		},
-		{
-			in:  `{app="foo"} # |= "bar" | json`,
-			exp: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-		},
-		{
-			in: `{app="foo"} | json #`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLabelParserExpr(OpParserTypeJSON, ""),
-				},
+	},
+	{
+		in:  `{app="foo"} # |= "bar" | json`,
+		exp: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+	},
+	{
+		in: `{app="foo"} | json #`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLabelParserExpr(OpParserTypeJSON, ""),
 			},
 		},
-		{
-			in:  `#{app="foo"} | json`,
-			err: logqlmodel.NewParseError("syntax error: unexpected $end", 1, 20),
-		},
-		{
-			in:  `{app="#"}`,
-			exp: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "#"}}),
-		},
-		{
-			in: `{app="foo"} |= "#"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLineFilterExpr(labels.MatchEqual, "", "#"),
-				},
+	},
+	{
+		in:  `#{app="foo"} | json`,
+		err: logqlmodel.NewParseError("syntax error: unexpected $end", 1, 20),
+	},
+	{
+		in:  `{app="#"}`,
+		exp: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "#"}}),
+	},
+	{
+		in: `{app="foo"} |= "#"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLineFilterExpr(labels.MatchEqual, "", "#"),
 			},
 		},
-		{
-			in: `{app="foo"} | bar="#"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					&LabelFilterExpr{
-						LabelFilterer: log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "bar", "#")),
-					},
+	},
+	{
+		in: `{app="foo"} | bar="#"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				&LabelFilterExpr{
+					LabelFilterer: log.NewStringLabelFilter(mustNewMatcher(labels.MatchEqual, "bar", "#")),
 				},
 			},
 		},
-		{
-			in: `{app="foo"} | json bob="top.sub[\"index\"]"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newJSONExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("bob", `top.sub["index"]`),
-					}),
-				},
+	},
+	{
+		in: `{app="foo"} | json bob="top.sub[\"index\"]"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newJSONExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("bob", `top.sub["index"]`),
+				}),
 			},
 		},
-		{
-			in: `{app="foo"} | json bob="top.params[0]"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newJSONExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("bob", `top.params[0]`),
-					}),
-				},
+	},
+	{
+		in: `{app="foo"} | json bob="top.params[0]"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newJSONExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("bob", `top.params[0]`),
+				}),
 			},
 		},
-		{
-			in: `{app="foo"} | json response_code="response.code", api_key="request.headers[\"X-API-KEY\"]"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newJSONExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("response_code", `response.code`),
-						log.NewLabelExtractionExpr("api_key", `request.headers["X-API-KEY"]`),
-					}),
-				},
+	},
+	{
+		in: `{app="foo"} | json response_code="response.code", api_key="request.headers[\"X-API-KEY\"]"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newJSONExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("response_code", `response.code`),
+					log.NewLabelExtractionExpr("api_key", `request.headers["X-API-KEY"]`),
+				}),
 			},
 		},
-		{
-			in: `{app="foo"} | json response_code, api_key="request.headers[\"X-API-KEY\"]", layer7_something_specific="layer7_something_specific"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newJSONExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("response_code", `response_code`),
-						log.NewLabelExtractionExpr("api_key", `request.headers["X-API-KEY"]`),
-						log.NewLabelExtractionExpr("layer7_something_specific", `layer7_something_specific`),
-					}),
-				},
+	},
+	{
+		in: `{app="foo"} | json response_code, api_key="request.headers[\"X-API-KEY\"]", layer7_something_specific="layer7_something_specific"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newJSONExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("response_code", `response_code`),
+					log.NewLabelExtractionExpr("api_key", `request.headers["X-API-KEY"]`),
+					log.NewLabelExtractionExpr("layer7_something_specific", `layer7_something_specific`),
+				}),
 			},
 		},
-		{
-			in: `count_over_time({ foo ="bar" } | json layer7_something_specific="layer7_something_specific" [12m])`,
-			exp: &RangeAggregationExpr{
-				Left: &LogRange{
-					Left: &PipelineExpr{
-						MultiStages: MultiStageExpr{
-							newJSONExpressionParser([]log.LabelExtractionExpr{
-								log.NewLabelExtractionExpr("layer7_something_specific", `layer7_something_specific`),
-							}),
-						},
-						Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
+	},
+	{
+		in: `count_over_time({ foo ="bar" } | json layer7_something_specific="layer7_something_specific" [12m])`,
+		exp: &RangeAggregationExpr{
+			Left: &LogRange{
+				Left: &PipelineExpr{
+					MultiStages: MultiStageExpr{
+						newJSONExpressionParser([]log.LabelExtractionExpr{
+							log.NewLabelExtractionExpr("layer7_something_specific", `layer7_something_specific`),
+						}),
 					},
-					Interval: 12 * time.Minute,
+					Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}},
 				},
-				Operation: "count_over_time",
+				Interval: 12 * time.Minute,
 			},
+			Operation: "count_over_time",
 		},
-		{
-			// binop always includes vector matching. Default is `without ()`,
-			// the zero value.
-			in: `
+	},
+	{
+		// binop always includes vector matching. Default is `without ()`,
+		// the zero value.
+		in: `
 			sum(count_over_time({foo="bar"}[5m])) or vector(1)
 			`,
-			exp: mustNewBinOpExpr(
-				OpTypeOr,
-				&BinOpOptions{
-					VectorMatching: &VectorMatching{Card: CardOneToOne},
-				},
-				mustNewVectorAggregationExpr(newRangeAggregationExpr(
-					&LogRange{
-						Left: &MatchersExpr{
-							Mts: []*labels.Matcher{
-								mustNewMatcher(labels.MatchEqual, "foo", "bar"),
-							},
+		exp: mustNewBinOpExpr(
+			OpTypeOr,
+			&BinOpOptions{
+				VectorMatching: &VectorMatching{Card: CardOneToOne},
+			},
+			mustNewVectorAggregationExpr(newRangeAggregationExpr(
+				&LogRange{
+					Left: &MatchersExpr{
+						Mts: []*labels.Matcher{
+							mustNewMatcher(labels.MatchEqual, "foo", "bar"),
 						},
-						Interval: 5 * time.Minute,
-					}, OpRangeTypeCount, nil, nil),
-					"sum",
-					&Grouping{},
-					nil,
-				),
-				NewVectorExpr("1"),
+					},
+					Interval: 5 * time.Minute,
+				}, OpRangeTypeCount, nil, nil),
+				"sum",
+				&Grouping{},
+				nil,
 			),
-		},
-		{
-			in: `{app="foo"} | logfmt message="msg"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLogfmtExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("message", `msg`),
-					}, nil),
-				},
+			NewVectorExpr("1"),
+		),
+	},
+	{
+		in: `{app="foo"} | logfmt message="msg"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLogfmtExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("message", `msg`),
+				}, nil),
 			},
 		},
-		{
-			in: `{app="foo"} | logfmt msg`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLogfmtExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("msg", `msg`),
-					}, nil),
-				},
+	},
+	{
+		in: `{app="foo"} | logfmt msg`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLogfmtExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("msg", `msg`),
+				}, nil),
 			},
 		},
-		{
-			in: `{app="foo"} | logfmt --strict msg`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLogfmtExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("msg", `msg`),
-					}, []string{OpStrict}),
-				},
+	},
+	{
+		in: `{app="foo"} | logfmt --strict msg`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLogfmtExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("msg", `msg`),
+				}, []string{OpStrict}),
 			},
 		},
-		{
-			in: `{app="foo"} | logfmt --keep-empty msg, err `,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLogfmtExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("msg", `msg`),
-						log.NewLabelExtractionExpr("err", `err`),
-					}, []string{OpKeepEmpty}),
-				},
+	},
+	{
+		in: `{app="foo"} | logfmt --keep-empty msg, err `,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLogfmtExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("msg", `msg`),
+					log.NewLabelExtractionExpr("err", `err`),
+				}, []string{OpKeepEmpty}),
 			},
 		},
-		{
-			in: `{app="foo"} | logfmt msg, err="error"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLogfmtExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("msg", `msg`),
-						log.NewLabelExtractionExpr("err", `error`),
-					}, nil),
-				},
+	},
+	{
+		in: `{app="foo"} | logfmt msg, err="error"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLogfmtExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("msg", `msg`),
+					log.NewLabelExtractionExpr("err", `error`),
+				}, nil),
 			},
 		},
-		{
-			in: `{app="foo"} | logfmt --strict --keep-empty msg="message", apiKey="api_key"`,
-			exp: &PipelineExpr{
-				Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
-				MultiStages: MultiStageExpr{
-					newLogfmtExpressionParser([]log.LabelExtractionExpr{
-						log.NewLabelExtractionExpr("msg", `message`),
-						log.NewLabelExtractionExpr("apiKey", `api_key`),
-					}, []string{OpStrict, OpKeepEmpty}),
-				},
+	},
+	{
+		in: `{app="foo"} | logfmt --strict --keep-empty msg="message", apiKey="api_key"`,
+		exp: &PipelineExpr{
+			Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}),
+			MultiStages: MultiStageExpr{
+				newLogfmtExpressionParser([]log.LabelExtractionExpr{
+					log.NewLabelExtractionExpr("msg", `message`),
+					log.NewLabelExtractionExpr("apiKey", `api_key`),
+				}, []string{OpStrict, OpKeepEmpty}),
 			},
 		},
-	} {
+	},
+}
+
+func TestParse(t *testing.T) {
+	for _, tc := range ParseTestCases {
 		t.Run(tc.in, func(t *testing.T) {
 			ast, err := ParseExpr(tc.in)
 			require.Equal(t, tc.err, err)
diff --git a/pkg/logql/syntax/serialize.go b/pkg/logql/syntax/serialize.go
new file mode 100644
index 0000000000000..02a88fd5ede28
--- /dev/null
+++ b/pkg/logql/syntax/serialize.go
@@ -0,0 +1,878 @@
+package syntax
+
+import (
+	"fmt"
+	"io"
+	"time"
+
+	jsoniter "github.com/json-iterator/go"
+	"github.com/prometheus/prometheus/model/labels"
+
+	"github.com/grafana/loki/pkg/logql/log"
+)
+
+type JSONSerializer struct {
+	*jsoniter.Stream
+}
+
+func NewJSONSerializer(s *jsoniter.Stream) *JSONSerializer {
+	return &JSONSerializer{
+		Stream: s,
+	}
+}
+
+func EncodeJSON(e Expr, w io.Writer) error {
+	s := jsoniter.ConfigFastest.BorrowStream(w)
+	defer jsoniter.ConfigFastest.ReturnStream(s)
+	v := NewJSONSerializer(s)
+	e.Accept(v)
+	return s.Flush()
+}
+
+func DecodeJSON(raw string) (Expr, error) {
+	iter := jsoniter.ParseString(jsoniter.ConfigFastest, raw)
+
+	key := iter.ReadObject()
+	switch key {
+	case "bin":
+		return decodeBinOp(iter)
+	case "vector_agg":
+		return decodeVectorAgg(iter)
+	case "range_agg":
+		return decodeRangeAgg(iter)
+	case "literal":
+		return decodeLiteral(iter)
+	case "vector":
+		return decodeVector(iter)
+	case "label_replace":
+		return decodeLabelReplace(iter)
+	case "log_selector":
+		return decodeLogSelector(iter)
+	default:
+		return nil, fmt.Errorf("unknown expression type: %s", key)
+	}
+}
+
+var _ RootVisitor = &JSONSerializer{}
+
+func (v *JSONSerializer) VisitBinOp(e *BinOpExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("bin")
+	v.WriteObjectStart()
+
+	v.WriteObjectField("op")
+	v.WriteString(e.Op)
+
+	v.WriteMore()
+	v.WriteObjectField("lhs")
+	e.SampleExpr.Accept(v)
+
+	v.WriteMore()
+	v.WriteObjectField("rhs")
+	e.RHS.Accept(v)
+
+	if e.Opts != nil {
+		v.WriteMore()
+		v.WriteObjectField("options")
+		v.WriteObjectStart()
+
+		v.WriteObjectField("return_bool")
+		v.WriteBool(e.Opts.ReturnBool)
+
+		if e.Opts.VectorMatching != nil {
+			v.WriteMore()
+			v.WriteObjectField("vector_matching")
+			encodeVectorMatching(v.Stream, e.Opts.VectorMatching)
+		}
+
+		v.WriteObjectEnd()
+		v.Flush()
+
+	}
+
+	v.WriteObjectEnd()
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitVectorAggregation(e *VectorAggregationExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("vector_agg")
+	v.WriteObjectStart()
+
+	v.WriteObjectField("params")
+	v.WriteInt(e.Params)
+
+	v.WriteMore()
+	v.WriteObjectField("operation")
+	v.WriteString(e.Operation)
+
+	if e.Grouping != nil {
+		v.WriteMore()
+		v.WriteObjectField("grouping")
+		encodeGrouping(v.Stream, e.Grouping)
+	}
+
+	v.WriteMore()
+	v.WriteObjectField("inner")
+	e.Left.Accept(v)
+
+	v.WriteObjectEnd()
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitRangeAggregation(e *RangeAggregationExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("range_agg")
+	v.WriteObjectStart()
+
+	v.WriteObjectField("op")
+	v.WriteString(e.Operation)
+
+	if e.Grouping != nil {
+		v.WriteMore()
+		v.WriteObjectField("grouping")
+		encodeGrouping(v.Stream, e.Grouping)
+	}
+
+	if e.Params != nil {
+		v.WriteMore()
+		v.WriteObjectField("params")
+		v.WriteFloat64(*e.Params)
+	}
+
+	v.WriteMore()
+	v.WriteObjectField("range")
+	v.VisitLogRange(e.Left)
+	v.WriteObjectEnd()
+
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitLogRange(e *LogRange) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("interval_nanos")
+	v.WriteInt64(int64(e.Interval))
+	v.WriteMore()
+	v.WriteObjectField("offset_nanos")
+	v.WriteInt64(int64(e.Offset))
+
+	// Serialize log selector pipeline as string.
+	v.WriteMore()
+	v.WriteObjectField("log_selector")
+	encodeLogSelector(v.Stream, e.Left)
+
+	if e.Unwrap != nil {
+		v.WriteMore()
+		v.WriteObjectField("unwrap")
+		encodeUnwrap(v.Stream, e.Unwrap)
+	}
+
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitLabelReplace(e *LabelReplaceExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("label_replace")
+	v.WriteObjectStart()
+
+	v.WriteObjectField("inner")
+	e.Left.Accept(v)
+
+	v.WriteMore()
+	v.WriteObjectField("dst")
+	v.WriteString(e.Dst)
+
+	v.WriteMore()
+	v.WriteObjectField("src")
+	v.WriteString(e.Src)
+
+	v.WriteMore()
+	v.WriteObjectField("replacement")
+	v.WriteString(e.Replacement)
+
+	v.WriteMore()
+	v.WriteObjectField("regex")
+	v.WriteString(e.Regex)
+
+	v.WriteObjectEnd()
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitLiteral(e *LiteralExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("literal")
+	v.WriteObjectStart()
+
+	v.WriteObjectField("val")
+	v.WriteFloat64(e.Val)
+
+	v.WriteObjectEnd()
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitVector(e *VectorExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("vector")
+	v.WriteObjectStart()
+
+	v.WriteObjectField("val")
+	v.WriteFloat64(e.Val)
+
+	v.WriteObjectEnd()
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitMatchers(e *MatchersExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("log_selector")
+	encodeLogSelector(v.Stream, e)
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+func (v *JSONSerializer) VisitPipeline(e *PipelineExpr) {
+	v.WriteObjectStart()
+
+	v.WriteObjectField("log_selector")
+	encodeLogSelector(v.Stream, e)
+	v.WriteObjectEnd()
+	v.Flush()
+}
+
+// Below are StageExpr visitors that we are skipping since a pipeline is
+// serialized as a string.
+func (*JSONSerializer) VisitDecolorize(*DecolorizeExpr)                     {}
+func (*JSONSerializer) VisitDropLabels(*DropLabelsExpr)                     {}
+func (*JSONSerializer) VisitJSONExpressionParser(*JSONExpressionParser)     {}
+func (*JSONSerializer) VisitKeepLabel(*KeepLabelsExpr)                      {}
+func (*JSONSerializer) VisitLabelFilter(*LabelFilterExpr)                   {}
+func (*JSONSerializer) VisitLabelFmt(*LabelFmtExpr)                         {}
+func (*JSONSerializer) VisitLabelParser(*LabelParserExpr)                   {}
+func (*JSONSerializer) VisitLineFilter(*LineFilterExpr)                     {}
+func (*JSONSerializer) VisitLineFmt(*LineFmtExpr)                           {}
+func (*JSONSerializer) VisitLogfmtExpressionParser(*LogfmtExpressionParser) {}
+func (*JSONSerializer) VisitLogfmtParser(*LogfmtParserExpr)                 {}
+
+func encodeGrouping(s *jsoniter.Stream, g *Grouping) {
+	s.WriteObjectStart()
+	s.WriteObjectField("without")
+	s.WriteBool(g.Without)
+
+	s.WriteMore()
+	s.WriteObjectField("groups")
+	s.WriteArrayStart()
+	for i, group := range g.Groups {
+		if i > 0 {
+			s.WriteMore()
+		}
+		s.WriteString(group)
+	}
+	s.WriteArrayEnd()
+	s.WriteObjectEnd()
+}
+
+func decodeGrouping(iter *jsoniter.Iterator) (*Grouping, error) {
+	g := &Grouping{}
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "without":
+			g.Without = iter.ReadBool()
+		case "groups":
+			iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
+				g.Groups = append(g.Groups, iter.ReadString())
+				return true
+			})
+		}
+	}
+
+	return g, nil
+}
+
+func encodeUnwrap(s *jsoniter.Stream, u *UnwrapExpr) {
+	s.WriteObjectStart()
+	s.WriteObjectField("identifier")
+	s.WriteString(u.Identifier)
+
+	s.WriteMore()
+	s.WriteObjectField("operation")
+	s.WriteString(u.Operation)
+
+	s.WriteMore()
+	s.WriteObjectField("post_filterers")
+	s.WriteArrayStart()
+	for i, filter := range u.PostFilters {
+		if i > 0 {
+			s.WriteMore()
+		}
+		encodeLabelFilter(s, filter)
+	}
+	s.WriteArrayEnd()
+
+	s.WriteObjectEnd()
+}
+
+func decodeUnwrap(iter *jsoniter.Iterator) *UnwrapExpr {
+	e := &UnwrapExpr{}
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "identifier":
+			e.Identifier = iter.ReadString()
+		case "operation":
+			e.Operation = iter.ReadString()
+		case "post_filterers":
+			iter.ReadArrayCB(func(i *jsoniter.Iterator) bool {
+				e.PostFilters = append(e.PostFilters, decodeLabelFilter(i))
+				return true
+			})
+		}
+	}
+
+	return e
+}
+
+const (
+	Name  = "name"
+	Value = "value"
+	Type  = "type"
+)
+
+func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
+	switch concrete := filter.(type) {
+	case *log.BinaryLabelFilter:
+		s.WriteObjectStart()
+		s.WriteObjectField("binary")
+
+		s.WriteObjectStart()
+		s.WriteObjectField("left")
+		encodeLabelFilter(s, concrete.Left)
+
+		s.WriteMore()
+		s.WriteObjectField("right")
+		encodeLabelFilter(s, concrete.Right)
+		s.WriteObjectEnd()
+
+		s.WriteMore()
+		s.WriteObjectField("and")
+		s.WriteBool(concrete.And)
+
+		s.WriteObjectEnd()
+	case log.NoopLabelFilter:
+		return
+	case *log.BytesLabelFilter:
+		s.WriteObjectStart()
+		s.WriteObjectField("bytes")
+
+		s.WriteObjectStart()
+		s.WriteObjectField(Name)
+		s.WriteString(concrete.Name)
+
+		s.WriteMore()
+		s.WriteObjectField(Value)
+		s.WriteUint64(concrete.Value)
+
+		s.WriteMore()
+		s.WriteObjectField(Type)
+		s.WriteInt(int(concrete.Type))
+		s.WriteObjectEnd()
+
+		s.WriteObjectEnd()
+	case *log.DurationLabelFilter:
+		s.WriteObjectStart()
+		s.WriteObjectField("duration")
+
+		s.WriteObjectStart()
+		s.WriteObjectField(Name)
+		s.WriteString(concrete.Name)
+
+		s.WriteMore()
+		s.WriteObjectField(Value)
+		s.WriteInt64(int64(concrete.Value))
+
+		s.WriteMore()
+		s.WriteObjectField(Type)
+		s.WriteInt(int(concrete.Type))
+		s.WriteObjectEnd()
+
+		s.WriteObjectEnd()
+	case *log.NumericLabelFilter:
+		s.WriteObjectStart()
+		s.WriteObjectField("numeric")
+
+		s.WriteObjectStart()
+		s.WriteObjectField(Name)
+		s.WriteString(concrete.Name)
+
+		s.WriteMore()
+		s.WriteObjectField(Value)
+		s.WriteFloat64(concrete.Value)
+
+		s.WriteMore()
+		s.WriteObjectField(Type)
+		s.WriteInt(int(concrete.Type))
+		s.WriteObjectEnd()
+
+		s.WriteObjectEnd()
+	case *log.StringLabelFilter:
+		s.WriteObjectStart()
+		s.WriteObjectField("string")
+
+		s.WriteObjectStart()
+		if concrete.Matcher != nil {
+			s.WriteObjectField(Name)
+			s.WriteString(concrete.Name)
+
+			s.WriteMore()
+			s.WriteObjectField(Value)
+			s.WriteString(concrete.Value)
+
+			s.WriteMore()
+			s.WriteObjectField(Type)
+			s.WriteInt(int(concrete.Type))
+		}
+		s.WriteObjectEnd()
+
+		s.WriteObjectEnd()
+	case *log.LineFilterLabelFilter:
+		// Line filter label filter are encoded as string filters as
+		// well. See log.NewStringLabelFilter.
+		s.WriteObjectStart()
+		s.WriteObjectField("string")
+
+		s.WriteObjectStart()
+		if concrete.Matcher != nil {
+			s.WriteObjectField(Name)
+			s.WriteString(concrete.Name)
+
+			s.WriteMore()
+			s.WriteObjectField(Value)
+			s.WriteString(concrete.Value)
+
+			s.WriteMore()
+			s.WriteObjectField(Type)
+			s.WriteInt(int(concrete.Type))
+		}
+		s.WriteObjectEnd()
+
+		s.WriteObjectEnd()
+	case *log.IPLabelFilter:
+		s.WriteObjectStart()
+		s.WriteObjectField("ip")
+
+		s.WriteObjectStart()
+		s.WriteObjectField(Type)
+		s.WriteInt(int(concrete.Ty))
+
+		s.WriteMore()
+		s.WriteObjectField("label")
+		s.WriteString(concrete.Label)
+
+		s.WriteMore()
+		s.WriteObjectField("pattern")
+		s.WriteString(concrete.Pattern)
+
+		s.WriteObjectEnd()
+
+		s.WriteObjectEnd()
+	}
+}
+
+func decodeLabelFilter(iter *jsoniter.Iterator) log.LabelFilterer {
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "binary":
+			var left, right log.LabelFilterer
+			var and bool
+			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
+				switch k {
+				case "and":
+					and = iter.ReadBool()
+				case "left":
+					left = decodeLabelFilter(iter)
+				case "right":
+					right = decodeLabelFilter(iter)
+				}
+			}
+
+			return &log.BinaryLabelFilter{
+				And:   and,
+				Left:  left,
+				Right: right,
+			}
+
+		case "bytes":
+			var name string
+			var b uint64
+			var t log.LabelFilterType
+			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
+				switch k {
+				case Name:
+					name = iter.ReadString()
+				case Value:
+					b = iter.ReadUint64()
+				case Type:
+					t = log.LabelFilterType(iter.ReadInt())
+				}
+			}
+			return log.NewBytesLabelFilter(t, name, b)
+		case "duration":
+			var name string
+			var duration time.Duration
+			var t log.LabelFilterType
+			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
+				switch k {
+				case Name:
+					name = iter.ReadString()
+				case Value:
+					duration = time.Duration(iter.ReadInt64())
+				case Type:
+					t = log.LabelFilterType(iter.ReadInt())
+				}
+			}
+
+			return log.NewDurationLabelFilter(t, name, duration)
+		case "numeric":
+			var name string
+			var value float64
+			var t log.LabelFilterType
+			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
+				switch k {
+				case Name:
+					name = iter.ReadString()
+				case Value:
+					value = iter.ReadFloat64()
+				case Type:
+					t = log.LabelFilterType(iter.ReadInt())
+				}
+			}
+
+			return log.NewNumericLabelFilter(t, name, value)
+		case "string":
+
+			var name string
+			var value string
+			var t labels.MatchType
+			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
+				switch k {
+				case Name:
+					name = iter.ReadString()
+				case Value:
+					value = iter.ReadString()
+				case Type:
+					t = labels.MatchType(iter.ReadInt())
+				}
+			}
+
+			var matcher *labels.Matcher
+			if name != "" && value != "" {
+				matcher = labels.MustNewMatcher(t, name, value)
+			}
+
+			return log.NewStringLabelFilter(matcher)
+
+		case "ip":
+			var label string
+			var pattern string
+			var t log.LabelFilterType
+			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
+				switch k {
+				case "pattern":
+					label = iter.ReadString()
+				case "label":
+					pattern = iter.ReadString()
+				case Type:
+					t = log.LabelFilterType(iter.ReadInt())
+				}
+			}
+			return log.NewIPLabelFilter(pattern, label, t)
+		}
+	}
+
+	return nil
+}
+
+func encodeLogSelector(s *jsoniter.Stream, e LogSelectorExpr) {
+	s.WriteObjectStart()
+	s.WriteObjectField("raw")
+
+	s.WriteString(e.String())
+
+	s.WriteObjectEnd()
+	s.Flush()
+}
+
+func decodeLogSelector(iter *jsoniter.Iterator) (LogSelectorExpr, error) {
+	var e LogSelectorExpr
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "raw":
+			raw := iter.ReadString()
+			expr, err := ParseExpr(raw)
+			if err != nil {
+				return nil, err
+			}
+
+			var ok bool
+			e, ok = expr.(LogSelectorExpr)
+
+			if !ok {
+				return nil, fmt.Errorf("unexpected expression type: want(LogSelectorExpr), got(%T)", expr)
+			}
+		}
+	}
+
+	return e, nil
+}
+
+func decodeSample(iter *jsoniter.Iterator) (SampleExpr, error) {
+	var expr SampleExpr
+	var err error
+	for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+		switch key {
+		case "bin":
+			expr, err = decodeBinOp(iter)
+		case "vector_agg":
+			expr, err = decodeVectorAgg(iter)
+		case "range_agg":
+			expr, err = decodeRangeAgg(iter)
+		case "literal":
+			expr, err = decodeLiteral(iter)
+		case "vector":
+			expr, err = decodeVector(iter)
+		case "label_replace":
+			expr, err = decodeLabelReplace(iter)
+		default:
+			return nil, fmt.Errorf("unknown sample expression type: %s", key)
+		}
+	}
+	return expr, err
+}
+
+func decodeBinOp(iter *jsoniter.Iterator) (*BinOpExpr, error) {
+	expr := &BinOpExpr{}
+	var err error
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "op":
+			expr.Op = iter.ReadString()
+		case "rhs":
+			expr.RHS, err = decodeSample(iter)
+		case "lhs":
+			expr.SampleExpr, err = decodeSample(iter)
+		case "options":
+			expr.Opts = decodeBinOpOptions(iter)
+		}
+	}
+
+	return expr, err
+}
+func decodeBinOpOptions(iter *jsoniter.Iterator) *BinOpOptions {
+	opts := &BinOpOptions{}
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "return_bool":
+			opts.ReturnBool = iter.ReadBool()
+		case "vector_matching":
+			opts.VectorMatching = decodeVectorMatching(iter)
+		}
+	}
+
+	return opts
+}
+
+func encodeVectorMatching(s *jsoniter.Stream, vm *VectorMatching) {
+	s.WriteObjectStart()
+
+	s.WriteObjectField("include")
+	s.WriteArrayStart()
+	for i, l := range vm.Include {
+		if i > 0 {
+			s.WriteMore()
+		}
+		s.WriteString(l)
+	}
+	s.WriteArrayEnd()
+
+	s.WriteMore()
+	s.WriteObjectField("on")
+	s.WriteBool(vm.On)
+
+	s.WriteMore()
+	s.WriteObjectField("card")
+	s.WriteInt(int(vm.Card))
+
+	s.WriteMore()
+	s.WriteObjectField("matching_labels")
+	s.WriteArrayStart()
+	for i, l := range vm.MatchingLabels {
+		if i > 0 {
+			s.WriteMore()
+		}
+		s.WriteString(l)
+	}
+	s.WriteArrayEnd()
+
+	s.WriteObjectEnd()
+}
+
+func decodeVectorMatching(iter *jsoniter.Iterator) *VectorMatching {
+	vm := &VectorMatching{}
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "include":
+			iter.ReadArrayCB(func(i *jsoniter.Iterator) bool {
+				vm.Include = append(vm.Include, i.ReadString())
+				return true
+			})
+		case "on":
+			vm.On = iter.ReadBool()
+		case "card":
+			vm.Card = VectorMatchCardinality(iter.ReadInt())
+		case "matching_labels":
+			iter.ReadArrayCB(func(i *jsoniter.Iterator) bool {
+				vm.MatchingLabels = append(vm.MatchingLabels, i.ReadString())
+				return true
+			})
+		}
+	}
+	return vm
+}
+
+func decodeVectorAgg(iter *jsoniter.Iterator) (*VectorAggregationExpr, error) {
+	expr := &VectorAggregationExpr{}
+	var err error
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "operation":
+			expr.Operation = iter.ReadString()
+		case "params":
+			expr.Params = iter.ReadInt()
+		case "grouping":
+			expr.Grouping, err = decodeGrouping(iter)
+		case "inner":
+			expr.Left, err = decodeSample(iter)
+		}
+	}
+
+	return expr, err
+}
+
+func decodeRangeAgg(iter *jsoniter.Iterator) (*RangeAggregationExpr, error) {
+	expr := &RangeAggregationExpr{}
+	var err error
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "op":
+			expr.Operation = iter.ReadString()
+		case "params":
+			tmp := iter.ReadFloat64()
+			expr.Params = &tmp
+		case "range":
+			expr.Left, err = decodeLogRange(iter)
+		case "grouping":
+			expr.Grouping, err = decodeGrouping(iter)
+		}
+	}
+
+	return expr, err
+}
+
+func decodeLogRange(iter *jsoniter.Iterator) (*LogRange, error) {
+	expr := &LogRange{}
+	var err error
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "log_selector":
+			expr.Left, err = decodeLogSelector(iter)
+		case "interval_nanos":
+			expr.Interval = time.Duration(iter.ReadInt64())
+		case "offset_nanos":
+			expr.Offset = time.Duration(iter.ReadInt64())
+		case "unwrap":
+			expr.Unwrap = decodeUnwrap(iter)
+		}
+	}
+
+	return expr, err
+}
+
+func decodeLabelReplace(iter *jsoniter.Iterator) (*LabelReplaceExpr, error) {
+	var err error
+	var left SampleExpr
+	var dst, src, replacement, regex string
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "inner":
+			left, err = decodeSample(iter)
+			if err != nil {
+				return nil, err
+			}
+		case "dst":
+			dst = iter.ReadString()
+		case "src":
+			src = iter.ReadString()
+		case "replacement":
+			replacement = iter.ReadString()
+		case "regex":
+			regex = iter.ReadString()
+		}
+	}
+
+	return mustNewLabelReplaceExpr(left, dst, replacement, src, regex), nil
+}
+
+func decodeLiteral(iter *jsoniter.Iterator) (*LiteralExpr, error) {
+	expr := &LiteralExpr{}
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "val":
+			expr.Val = iter.ReadFloat64()
+		}
+	}
+
+	return expr, nil
+}
+
+func decodeVector(iter *jsoniter.Iterator) (*VectorExpr, error) {
+	expr := &VectorExpr{}
+
+	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+		switch f {
+		case "val":
+			expr.Val = iter.ReadFloat64()
+		}
+	}
+
+	return expr, nil
+}
+
+func decodeMatchers(iter *jsoniter.Iterator) (LogSelectorExpr, error) {
+	return decodeLogSelector(iter)
+}
+
+func decodePipeline(iter *jsoniter.Iterator) (LogSelectorExpr, error) {
+	return decodeLogSelector(iter)
+}
diff --git a/pkg/logql/syntax/serialize_test.go b/pkg/logql/syntax/serialize_test.go
new file mode 100644
index 0000000000000..9d48c6b8c9f38
--- /dev/null
+++ b/pkg/logql/syntax/serialize_test.go
@@ -0,0 +1,93 @@
+package syntax
+
+import (
+	"bytes"
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestJSONSerializationRoundTrip(t *testing.T) {
+	tests := map[string]struct {
+		query string
+	}{
+		"simple matchers": {
+			query: `{env="prod", app=~"loki.*"}`,
+		},
+		"simple aggregation": {
+			query: `count_over_time({env="prod", app=~"loki.*"}[5m])`,
+		},
+		"simple aggregation with unwrap": {
+			query: `sum_over_time({env="prod", app=~"loki.*"} | unwrap bytes[5m])`,
+		},
+		"bin op": {
+			query: `(count_over_time({env="prod", app=~"loki.*"}[5m]) >= 0)`,
+		},
+		"label filter": {
+			query: `{app="foo"} |= "bar" | json | ( latency>=250ms or ( status_code<500 , status_code>200 ) )`,
+		},
+		"regexp": {
+			query: `{env="prod", app=~"loki.*"} |~ ".*foo.*"`,
+		},
+		"vector matching": {
+			query: `(sum by (cluster)(rate({foo="bar"}[5m])) / ignoring (cluster)  count(rate({foo="bar"}[5m])))`,
+		},
+		"sum over or vector": {
+			query: `(sum(count_over_time({foo="bar"}[5m])) or vector(1.000000))`,
+		},
+		"label replace": {
+			query: `label_replace(vector(0.000000),"foo","bar","","")`,
+		},
+		"filters with bytes": {
+			query: `{app="foo"} |= "bar" | json | ( status_code <500 or ( status_code>200 , size>=2.5KiB ) )`,
+		},
+		"post filters": {
+			query: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
+				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo | __error__ !~".+"[5m]) by (namespace,instance)`,
+		},
+	}
+
+	for name, test := range tests {
+		t.Run(name, func(t *testing.T) {
+
+			expr, err := ParseExpr(test.query)
+			require.NoError(t, err)
+
+			var buf bytes.Buffer
+			err = EncodeJSON(expr, &buf)
+			require.NoError(t, err)
+
+			t.Log(buf.String())
+
+			actual, err := DecodeJSON(buf.String())
+			require.NoError(t, err)
+
+			//require.Equal(t, test.query, actual.String())
+			require.Equal(t, expr.Pretty(0), actual.Pretty(0))
+		})
+	}
+}
+func TestJSONSerializationParseTestCases(t *testing.T) {
+	for _, tc := range ParseTestCases {
+		if tc.err == nil {
+			t.Run(tc.in, func(t *testing.T) {
+				ast, err := ParseExpr(tc.in)
+				require.NoError(t, err)
+				if strings.Contains(tc.in, "KiB") {
+					t.Skipf("Byte roundtrip conversion is broken. '%s' vs '%s'", tc.in, ast.String())
+				}
+
+				var buf bytes.Buffer
+				err = EncodeJSON(ast, &buf)
+				require.NoError(t, err)
+				actual, err := DecodeJSON(buf.String())
+				require.NoError(t, err)
+
+				t.Log(buf.String())
+
+				require.Equal(t, tc.exp, actual)
+			})
+		}
+	}
+}
diff --git a/pkg/logql/syntax/walk.go b/pkg/logql/syntax/walk.go
index 291ec8b31036f..c528c9ca63437 100644
--- a/pkg/logql/syntax/walk.go
+++ b/pkg/logql/syntax/walk.go
@@ -1,5 +1,7 @@
 package syntax
 
+import "fmt"
+
 type WalkFn = func(e Expr)
 
 func walkAll(f WalkFn, xs ...Walkable) {
@@ -11,3 +13,120 @@ func walkAll(f WalkFn, xs ...Walkable) {
 type Walkable interface {
 	Walk(f WalkFn)
 }
+
+type AcceptVisitor interface {
+	Accept(RootVisitor)
+}
+
+type RootVisitor interface {
+	SampleExprVisitor
+	LogSelectorExprVisitor
+	StageExprVisitor
+
+	VisitLogRange(*LogRange)
+}
+
+type SampleExprVisitor interface {
+	VisitBinOp(*BinOpExpr)
+	VisitVectorAggregation(*VectorAggregationExpr)
+	VisitRangeAggregation(*RangeAggregationExpr)
+	VisitLabelReplace(*LabelReplaceExpr)
+	VisitLiteral(*LiteralExpr)
+	VisitVector(*VectorExpr)
+}
+
+type LogSelectorExprVisitor interface {
+	VisitMatchers(*MatchersExpr)
+	VisitPipeline(*PipelineExpr)
+	VisitLiteral(*LiteralExpr)
+	VisitVector(*VectorExpr)
+}
+
+type StageExprVisitor interface {
+	VisitDecolorize(*DecolorizeExpr)
+	VisitDropLabels(*DropLabelsExpr)
+	VisitJSONExpressionParser(*JSONExpressionParser)
+	VisitKeepLabel(*KeepLabelsExpr)
+	VisitLabelFilter(*LabelFilterExpr)
+	VisitLabelFmt(*LabelFmtExpr)
+	VisitLabelParser(*LabelParserExpr)
+	VisitLineFilter(*LineFilterExpr)
+	VisitLineFmt(*LineFmtExpr)
+	VisitLogfmtExpressionParser(*LogfmtExpressionParser)
+	VisitLogfmtParser(*LogfmtParserExpr)
+}
+
+func Dispatch(root Expr, v RootVisitor) error {
+	switch e := root.(type) {
+	case SampleExpr:
+		DispatchSampleExpr(e, v)
+	case LogSelectorExpr:
+		DispatchLogSelectorExpr(e, v)
+	case StageExpr:
+		DispatchStageExpr(e, v)
+	case *LogRange:
+		v.VisitLogRange(e)
+	default:
+		return fmt.Errorf("unpexpected root expression type: got (%T)", e)
+	}
+
+	return nil
+}
+
+func DispatchSampleExpr(expr SampleExpr, v SampleExprVisitor) {
+	switch e := expr.(type) {
+	case *BinOpExpr:
+		v.VisitBinOp(e)
+	case *VectorAggregationExpr:
+		v.VisitVectorAggregation(e)
+	case *RangeAggregationExpr:
+		v.VisitRangeAggregation(e)
+	case *LabelReplaceExpr:
+		v.VisitLabelReplace(e)
+	case *LiteralExpr:
+		v.VisitLiteral(e)
+	case *VectorExpr:
+		v.VisitVector(e)
+	}
+}
+
+func DispatchLogSelectorExpr(expr LogSelectorExpr, v LogSelectorExprVisitor) {
+	switch e := expr.(type) {
+	case *PipelineExpr:
+		v.VisitPipeline(e)
+	case *MatchersExpr:
+		v.VisitMatchers(e)
+	case *VectorExpr:
+		v.VisitVector(e)
+	case *LiteralExpr:
+		v.VisitLiteral(e)
+	}
+}
+
+func DispatchStageExpr(expr StageExpr, v StageExprVisitor) {
+	switch e := expr.(type) {
+	case *DecolorizeExpr:
+		v.VisitDecolorize(e)
+	case *DropLabelsExpr:
+		v.VisitDropLabels(e)
+	case *JSONExpressionParser:
+		v.VisitJSONExpressionParser(e)
+	case *KeepLabelsExpr:
+		v.VisitKeepLabel(e)
+	case *LabelFilterExpr:
+		v.VisitLabelFilter(e)
+	case *LabelFmtExpr:
+		v.VisitLabelFmt(e)
+	case *LabelParserExpr:
+		v.VisitLabelParser(e)
+	case *LineFilterExpr:
+		v.VisitLineFilter(e)
+	case *LineFmtExpr:
+		v.VisitLineFmt(e)
+	case *LogfmtExpressionParser:
+		v.VisitLogfmtExpressionParser(e)
+	case *LogfmtParserExpr:
+		v.VisitLogfmtParser(e)
+	}
+
+}

From c6f421233bd503895ff27316d759fed1815b40e7 Mon Sep 17 00:00:00 2001
From: Karsten Jeschkies 
Date: Wed, 15 Nov 2023 13:42:08 +0100
Subject: [PATCH 03/48] Increase test coverage for AST serialization to >94%.
 (#11230)

**What this PR does / why we need it**:
This is a follow up to https://github.com/grafana/loki/pull/11123 and
fixes a few bugs discovered by increasing the test coverage.

**Checklist**
- [ ] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [ ] Documentation added
- [x] Tests updated
- [ ] `CHANGELOG.md` updated
- [ ] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
- [ ] If the change is deprecating or removing a configuration option,
update the `deprecated-config.yaml` and `deleted-config.yaml` files
respectively in the `tools/deprecated-config-checker` directory.
[Example
PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
---
 pkg/logql/syntax/serialize.go      | 303 +++++++++++++++++------------
 pkg/logql/syntax/serialize_test.go |   9 +-
 2 files changed, 180 insertions(+), 132 deletions(-)

diff --git a/pkg/logql/syntax/serialize.go b/pkg/logql/syntax/serialize.go
index 02a88fd5ede28..2d7a1d786fda7 100644
--- a/pkg/logql/syntax/serialize.go
+++ b/pkg/logql/syntax/serialize.go
@@ -29,24 +29,73 @@ func EncodeJSON(e Expr, w io.Writer) error {
 	return s.Flush()
 }
 
+// Field names
+const (
+	Bin                 = "bin"
+	Binary              = "binary"
+	Bytes               = "bytes"
+	And                 = "and"
+	Card                = "cardinality"
+	Dst                 = "dst"
+	Duration            = "duration"
+	Groups              = "groups"
+	GroupingField       = "grouping"
+	Include             = "include"
+	Identifier          = "identifier"
+	Inner               = "inner"
+	IntervalNanos       = "interval_nanos"
+	IPField             = "ip"
+	Label               = "label"
+	LabelReplace        = "label_replace"
+	LHS                 = "lhs"
+	Literal             = "literal"
+	LogSelector         = "log_selector"
+	Name                = "name"
+	Numeric             = "numeric"
+	MatchingLabels      = "matching_labels"
+	On                  = "on"
+	Op                  = "operation"
+	Options             = "options"
+	OffsetNanos         = "offset_nanos"
+	Params              = "params"
+	Pattern             = "pattern"
+	PostFilterers       = "post_filterers"
+	Range               = "range"
+	RangeAgg            = "range_agg"
+	Raw                 = "raw"
+	RegexField          = "regex"
+	Replacement         = "replacement"
+	ReturnBool          = "return_bool"
+	RHS                 = "rhs"
+	Src                 = "src"
+	StringField         = "string"
+	Type                = "type"
+	Unwrap              = "unwrap"
+	Value               = "value"
+	Vector              = "vector"
+	VectorAgg           = "vector_agg"
+	VectorMatchingField = "vector_matching"
+	Without             = "without"
+)
+
 func DecodeJSON(raw string) (Expr, error) {
 	iter := jsoniter.ParseString(jsoniter.ConfigFastest, raw)
 
 	key := iter.ReadObject()
 	switch key {
-	case "bin":
+	case Bin:
 		return decodeBinOp(iter)
-	case "vector_agg":
+	case VectorAgg:
 		return decodeVectorAgg(iter)
-	case "range_agg":
+	case RangeAgg:
 		return decodeRangeAgg(iter)
-	case "literal":
+	case Literal:
 		return decodeLiteral(iter)
-	case "vector":
+	case Vector:
 		return decodeVector(iter)
-	case "label_replace":
+	case LabelReplace:
 		return decodeLabelReplace(iter)
-	case "log_selector":
+	case LogSelector:
 		return decodeLogSelector(iter)
 	default:
 		return nil, fmt.Errorf("unknown expression type: %s", key)
@@ -58,31 +107,31 @@ var _ RootVisitor = &JSONSerializer{}
 func (v *JSONSerializer) VisitBinOp(e *BinOpExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("bin")
+	v.WriteObjectField(Bin)
 	v.WriteObjectStart()
 
-	v.WriteObjectField("op")
+	v.WriteObjectField(Op)
 	v.WriteString(e.Op)
 
 	v.WriteMore()
-	v.WriteObjectField("lhs")
+	v.WriteObjectField(LHS)
 	e.SampleExpr.Accept(v)
 
 	v.WriteMore()
-	v.WriteObjectField("rhs")
+	v.WriteObjectField(RHS)
 	e.RHS.Accept(v)
 
 	if e.Opts != nil {
 		v.WriteMore()
-		v.WriteObjectField("options")
+		v.WriteObjectField(Options)
 		v.WriteObjectStart()
 
-		v.WriteObjectField("return_bool")
+		v.WriteObjectField(ReturnBool)
 		v.WriteBool(e.Opts.ReturnBool)
 
 		if e.Opts.VectorMatching != nil {
 			v.WriteMore()
-			v.WriteObjectField("vector_matching")
+			v.WriteObjectField(VectorMatchingField)
 			encodeVectorMatching(v.Stream, e.Opts.VectorMatching)
 		}
 
@@ -99,24 +148,24 @@ func (v *JSONSerializer) VisitBinOp(e *BinOpExpr) {
 func (v *JSONSerializer) VisitVectorAggregation(e *VectorAggregationExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("vector_agg")
+	v.WriteObjectField(VectorAgg)
 	v.WriteObjectStart()
 
-	v.WriteObjectField("params")
+	v.WriteObjectField(Params)
 	v.WriteInt(e.Params)
 
 	v.WriteMore()
-	v.WriteObjectField("operation")
+	v.WriteObjectField(Op)
 	v.WriteString(e.Operation)
 
 	if e.Grouping != nil {
 		v.WriteMore()
-		v.WriteObjectField("grouping")
+		v.WriteObjectField(GroupingField)
 		encodeGrouping(v.Stream, e.Grouping)
 	}
 
 	v.WriteMore()
-	v.WriteObjectField("inner")
+	v.WriteObjectField(Inner)
 	e.Left.Accept(v)
 
 	v.WriteObjectEnd()
@@ -127,26 +176,26 @@ func (v *JSONSerializer) VisitVectorAggregation(e *VectorAggregationExpr) {
 func (v *JSONSerializer) VisitRangeAggregation(e *RangeAggregationExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("range_agg")
+	v.WriteObjectField(RangeAgg)
 	v.WriteObjectStart()
 
-	v.WriteObjectField("op")
+	v.WriteObjectField(Op)
 	v.WriteString(e.Operation)
 
 	if e.Grouping != nil {
 		v.WriteMore()
-		v.WriteObjectField("grouping")
+		v.WriteObjectField(GroupingField)
 		encodeGrouping(v.Stream, e.Grouping)
 	}
 
 	if e.Params != nil {
 		v.WriteMore()
-		v.WriteObjectField("params")
+		v.WriteObjectField(Params)
 		v.WriteFloat64(*e.Params)
 	}
 
 	v.WriteMore()
-	v.WriteObjectField("range")
+	v.WriteObjectField(Range)
 	v.VisitLogRange(e.Left)
 	v.WriteObjectEnd()
 
@@ -157,20 +206,20 @@ func (v *JSONSerializer) VisitRangeAggregation(e *RangeAggregationExpr) {
 func (v *JSONSerializer) VisitLogRange(e *LogRange) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("interval_nanos")
+	v.WriteObjectField(IntervalNanos)
 	v.WriteInt64(int64(e.Interval))
 	v.WriteMore()
-	v.WriteObjectField("offset_nanos")
+	v.WriteObjectField(OffsetNanos)
 	v.WriteInt64(int64(e.Offset))
 
 	// Serialize log selector pipeline as string.
 	v.WriteMore()
-	v.WriteObjectField("log_selector")
+	v.WriteObjectField(LogSelector)
 	encodeLogSelector(v.Stream, e.Left)
 
 	if e.Unwrap != nil {
 		v.WriteMore()
-		v.WriteObjectField("unwrap")
+		v.WriteObjectField(Unwrap)
 		encodeUnwrap(v.Stream, e.Unwrap)
 	}
 
@@ -181,26 +230,26 @@ func (v *JSONSerializer) VisitLogRange(e *LogRange) {
 func (v *JSONSerializer) VisitLabelReplace(e *LabelReplaceExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("label_replace")
+	v.WriteObjectField(LabelReplace)
 	v.WriteObjectStart()
 
-	v.WriteObjectField("inner")
+	v.WriteObjectField(Inner)
 	e.Left.Accept(v)
 
 	v.WriteMore()
-	v.WriteObjectField("dst")
+	v.WriteObjectField(Dst)
 	v.WriteString(e.Dst)
 
 	v.WriteMore()
-	v.WriteObjectField("src")
+	v.WriteObjectField(Src)
 	v.WriteString(e.Src)
 
 	v.WriteMore()
-	v.WriteObjectField("replacement")
+	v.WriteObjectField(Replacement)
 	v.WriteString(e.Replacement)
 
 	v.WriteMore()
-	v.WriteObjectField("regex")
+	v.WriteObjectField(RegexField)
 	v.WriteString(e.Regex)
 
 	v.WriteObjectEnd()
@@ -211,10 +260,10 @@ func (v *JSONSerializer) VisitLabelReplace(e *LabelReplaceExpr) {
 func (v *JSONSerializer) VisitLiteral(e *LiteralExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("literal")
+	v.WriteObjectField(Literal)
 	v.WriteObjectStart()
 
-	v.WriteObjectField("val")
+	v.WriteObjectField(Value)
 	v.WriteFloat64(e.Val)
 
 	v.WriteObjectEnd()
@@ -225,10 +274,10 @@ func (v *JSONSerializer) VisitLiteral(e *LiteralExpr) {
 func (v *JSONSerializer) VisitVector(e *VectorExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("vector")
+	v.WriteObjectField(Vector)
 	v.WriteObjectStart()
 
-	v.WriteObjectField("val")
+	v.WriteObjectField(Value)
 	v.WriteFloat64(e.Val)
 
 	v.WriteObjectEnd()
@@ -239,7 +288,7 @@ func (v *JSONSerializer) VisitVector(e *VectorExpr) {
 func (v *JSONSerializer) VisitMatchers(e *MatchersExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("log_selector")
+	v.WriteObjectField(LogSelector)
 	encodeLogSelector(v.Stream, e)
 	v.WriteObjectEnd()
 	v.Flush()
@@ -248,7 +297,7 @@ func (v *JSONSerializer) VisitMatchers(e *MatchersExpr) {
 func (v *JSONSerializer) VisitPipeline(e *PipelineExpr) {
 	v.WriteObjectStart()
 
-	v.WriteObjectField("log_selector")
+	v.WriteObjectField(LogSelector)
 	encodeLogSelector(v.Stream, e)
 	v.WriteObjectEnd()
 	v.Flush()
@@ -270,11 +319,11 @@ func (*JSONSerializer) VisitLogfmtParser(*LogfmtParserExpr)                 {}
 
 func encodeGrouping(s *jsoniter.Stream, g *Grouping) {
 	s.WriteObjectStart()
-	s.WriteObjectField("without")
+	s.WriteObjectField(Without)
 	s.WriteBool(g.Without)
 
 	s.WriteMore()
-	s.WriteObjectField("groups")
+	s.WriteObjectField(Groups)
 	s.WriteArrayStart()
 	for i, group := range g.Groups {
 		if i > 0 {
@@ -290,9 +339,9 @@ func decodeGrouping(iter *jsoniter.Iterator) (*Grouping, error) {
 	g := &Grouping{}
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "without":
+		case Without:
 			g.Without = iter.ReadBool()
-		case "groups":
+		case Groups:
 			iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
 				g.Groups = append(g.Groups, iter.ReadString())
 				return true
@@ -305,15 +354,15 @@ func decodeGrouping(iter *jsoniter.Iterator) (*Grouping, error) {
 
 func encodeUnwrap(s *jsoniter.Stream, u *UnwrapExpr) {
 	s.WriteObjectStart()
-	s.WriteObjectField("identifier")
+	s.WriteObjectField(Identifier)
 	s.WriteString(u.Identifier)
 
 	s.WriteMore()
-	s.WriteObjectField("operation")
+	s.WriteObjectField(Op)
 	s.WriteString(u.Operation)
 
 	s.WriteMore()
-	s.WriteObjectField("post_filterers")
+	s.WriteObjectField(PostFilterers)
 	s.WriteArrayStart()
 	for i, filter := range u.PostFilters {
 		if i > 0 {
@@ -330,11 +379,11 @@ func decodeUnwrap(iter *jsoniter.Iterator) *UnwrapExpr {
 	e := &UnwrapExpr{}
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "identifier":
+		case Identifier:
 			e.Identifier = iter.ReadString()
-		case "operation":
+		case Op:
 			e.Operation = iter.ReadString()
-		case "post_filterers":
+		case PostFilterers:
 			iter.ReadArrayCB(func(i *jsoniter.Iterator) bool {
 				e.PostFilters = append(e.PostFilters, decodeLabelFilter(i))
 				return true
@@ -345,37 +394,32 @@ func decodeUnwrap(iter *jsoniter.Iterator) *UnwrapExpr {
 	return e
 }
 
-const (
-	Name  = "name"
-	Value = "value"
-	Type  = "type"
-)
-
 func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
 	switch concrete := filter.(type) {
 	case *log.BinaryLabelFilter:
 		s.WriteObjectStart()
-		s.WriteObjectField("binary")
+		s.WriteObjectField(Binary)
 
 		s.WriteObjectStart()
-		s.WriteObjectField("left")
+		s.WriteObjectField(LHS)
 		encodeLabelFilter(s, concrete.Left)
 
 		s.WriteMore()
-		s.WriteObjectField("right")
+		s.WriteObjectField(RHS)
 		encodeLabelFilter(s, concrete.Right)
-		s.WriteObjectEnd()
 
 		s.WriteMore()
-		s.WriteObjectField("and")
+		s.WriteObjectField(And)
 		s.WriteBool(concrete.And)
 
 		s.WriteObjectEnd()
+
+		s.WriteObjectEnd()
 	case log.NoopLabelFilter:
 		return
 	case *log.BytesLabelFilter:
 		s.WriteObjectStart()
-		s.WriteObjectField("bytes")
+		s.WriteObjectField(Bytes)
 
 		s.WriteObjectStart()
 		s.WriteObjectField(Name)
@@ -393,7 +437,7 @@ func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
 		s.WriteObjectEnd()
 	case *log.DurationLabelFilter:
 		s.WriteObjectStart()
-		s.WriteObjectField("duration")
+		s.WriteObjectField(Duration)
 
 		s.WriteObjectStart()
 		s.WriteObjectField(Name)
@@ -411,7 +455,7 @@ func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
 		s.WriteObjectEnd()
 	case *log.NumericLabelFilter:
 		s.WriteObjectStart()
-		s.WriteObjectField("numeric")
+		s.WriteObjectField(Numeric)
 
 		s.WriteObjectStart()
 		s.WriteObjectField(Name)
@@ -429,7 +473,7 @@ func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
 		s.WriteObjectEnd()
 	case *log.StringLabelFilter:
 		s.WriteObjectStart()
-		s.WriteObjectField("string")
+		s.WriteObjectField(StringField)
 
 		s.WriteObjectStart()
 		if concrete.Matcher != nil {
@@ -451,7 +495,7 @@ func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
 		// Line filter label filter are encoded as string filters as
 		// well. See log.NewStringLabelFilter.
 		s.WriteObjectStart()
-		s.WriteObjectField("string")
+		s.WriteObjectField(StringField)
 
 		s.WriteObjectStart()
 		if concrete.Matcher != nil {
@@ -471,18 +515,18 @@ func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
 		s.WriteObjectEnd()
 	case *log.IPLabelFilter:
 		s.WriteObjectStart()
-		s.WriteObjectField("ip")
+		s.WriteObjectField(IPField)
 
 		s.WriteObjectStart()
 		s.WriteObjectField(Type)
 		s.WriteInt(int(concrete.Ty))
 
 		s.WriteMore()
-		s.WriteObjectField("label")
+		s.WriteObjectField(Label)
 		s.WriteString(concrete.Label)
 
 		s.WriteMore()
-		s.WriteObjectField("pattern")
+		s.WriteObjectField(Pattern)
 		s.WriteString(concrete.Pattern)
 
 		s.WriteObjectEnd()
@@ -492,29 +536,30 @@ func encodeLabelFilter(s *jsoniter.Stream, filter log.LabelFilterer) {
 }
 
 func decodeLabelFilter(iter *jsoniter.Iterator) log.LabelFilterer {
+	var filter log.LabelFilterer
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "binary":
+		case Binary:
 			var left, right log.LabelFilterer
 			var and bool
 			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
 				switch k {
-				case "and":
+				case And:
 					and = iter.ReadBool()
-				case "left":
+				case LHS:
 					left = decodeLabelFilter(iter)
-				case "right":
+				case RHS:
 					right = decodeLabelFilter(iter)
 				}
 			}
 
-			return &log.BinaryLabelFilter{
+			filter = &log.BinaryLabelFilter{
 				And:   and,
 				Left:  left,
 				Right: right,
 			}
 
-		case "bytes":
+		case Bytes:
 			var name string
 			var b uint64
 			var t log.LabelFilterType
@@ -528,8 +573,8 @@ func decodeLabelFilter(iter *jsoniter.Iterator) log.LabelFilterer {
 					t = log.LabelFilterType(iter.ReadInt())
 				}
 			}
-			return log.NewBytesLabelFilter(t, name, b)
-		case "duration":
+			filter = log.NewBytesLabelFilter(t, name, b)
+		case Duration:
 			var name string
 			var duration time.Duration
 			var t log.LabelFilterType
@@ -544,8 +589,8 @@ func decodeLabelFilter(iter *jsoniter.Iterator) log.LabelFilterer {
 				}
 			}
 
-			return log.NewDurationLabelFilter(t, name, duration)
-		case "numeric":
+			filter = log.NewDurationLabelFilter(t, name, duration)
+		case Numeric:
 			var name string
 			var value float64
 			var t log.LabelFilterType
@@ -560,8 +605,8 @@ func decodeLabelFilter(iter *jsoniter.Iterator) log.LabelFilterer {
 				}
 			}
 
-			return log.NewNumericLabelFilter(t, name, value)
-		case "string":
+			filter = log.NewNumericLabelFilter(t, name, value)
+		case StringField:
 
 			var name string
 			var value string
@@ -582,32 +627,32 @@ func decodeLabelFilter(iter *jsoniter.Iterator) log.LabelFilterer {
 				matcher = labels.MustNewMatcher(t, name, value)
 			}
 
-			return log.NewStringLabelFilter(matcher)
+			filter = log.NewStringLabelFilter(matcher)
 
-		case "ip":
+		case IPField:
 			var label string
 			var pattern string
 			var t log.LabelFilterType
 			for k := iter.ReadObject(); k != ""; k = iter.ReadObject() {
 				switch k {
-				case "pattern":
-					label = iter.ReadString()
-				case "label":
+				case Pattern:
 					pattern = iter.ReadString()
+				case Label:
+					label = iter.ReadString()
 				case Type:
 					t = log.LabelFilterType(iter.ReadInt())
 				}
 			}
-			return log.NewIPLabelFilter(pattern, label, t)
+			filter = log.NewIPLabelFilter(pattern, label, t)
 		}
 	}
 
-	return nil
+	return filter
 }
 
 func encodeLogSelector(s *jsoniter.Stream, e LogSelectorExpr) {
 	s.WriteObjectStart()
-	s.WriteObjectField("raw")
+	s.WriteObjectField(Raw)
 
 	s.WriteString(e.String())
 
@@ -620,7 +665,7 @@ func decodeLogSelector(iter *jsoniter.Iterator) (LogSelectorExpr, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "raw":
+		case Raw:
 			raw := iter.ReadString()
 			expr, err := ParseExpr(raw)
 			if err != nil {
@@ -644,17 +689,17 @@ func decodeSample(iter *jsoniter.Iterator) (SampleExpr, error) {
 	var err error
 	for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
 		switch key {
-		case "bin":
+		case Bin:
 			expr, err = decodeBinOp(iter)
-		case "vector_agg":
+		case VectorAgg:
 			expr, err = decodeVectorAgg(iter)
-		case "range_agg":
+		case RangeAgg:
 			expr, err = decodeRangeAgg(iter)
-		case "literal":
+		case Literal:
 			expr, err = decodeLiteral(iter)
-		case "vector":
+		case Vector:
 			expr, err = decodeVector(iter)
-		case "label_replace":
+		case LabelReplace:
 			expr, err = decodeLabelReplace(iter)
 		default:
 			return nil, fmt.Errorf("unknown sample expression type: %s", key)
@@ -669,13 +714,13 @@ func decodeBinOp(iter *jsoniter.Iterator) (*BinOpExpr, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "op":
+		case Op:
 			expr.Op = iter.ReadString()
-		case "rhs":
+		case RHS:
 			expr.RHS, err = decodeSample(iter)
-		case "lhs":
+		case LHS:
 			expr.SampleExpr, err = decodeSample(iter)
-		case "options":
+		case Options:
 			expr.Opts = decodeBinOpOptions(iter)
 		}
 	}
@@ -687,9 +732,9 @@ func decodeBinOpOptions(iter *jsoniter.Iterator) *BinOpOptions {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "return_bool":
+		case ReturnBool:
 			opts.ReturnBool = iter.ReadBool()
-		case "vector_matching":
+		case VectorMatchingField:
 			opts.VectorMatching = decodeVectorMatching(iter)
 		}
 	}
@@ -700,7 +745,7 @@ func decodeBinOpOptions(iter *jsoniter.Iterator) *BinOpOptions {
 func encodeVectorMatching(s *jsoniter.Stream, vm *VectorMatching) {
 	s.WriteObjectStart()
 
-	s.WriteObjectField("include")
+	s.WriteObjectField(Include)
 	s.WriteArrayStart()
 	for i, l := range vm.Include {
 		if i > 0 {
@@ -711,15 +756,15 @@ func encodeVectorMatching(s *jsoniter.Stream, vm *VectorMatching) {
 	s.WriteArrayEnd()
 
 	s.WriteMore()
-	s.WriteObjectField("on")
+	s.WriteObjectField(On)
 	s.WriteBool(vm.On)
 
 	s.WriteMore()
-	s.WriteObjectField("card")
+	s.WriteObjectField(Card)
 	s.WriteInt(int(vm.Card))
 
 	s.WriteMore()
-	s.WriteObjectField("matching_labels")
+	s.WriteObjectField(MatchingLabels)
 	s.WriteArrayStart()
 	for i, l := range vm.MatchingLabels {
 		if i > 0 {
@@ -737,16 +782,16 @@ func decodeVectorMatching(iter *jsoniter.Iterator) *VectorMatching {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "include":
+		case Include:
 			iter.ReadArrayCB(func(i *jsoniter.Iterator) bool {
 				vm.Include = append(vm.Include, i.ReadString())
 				return true
 			})
-		case "on":
+		case On:
 			vm.On = iter.ReadBool()
-		case "card":
+		case Card:
 			vm.Card = VectorMatchCardinality(iter.ReadInt())
-		case "matching_labels":
+		case MatchingLabels:
 			iter.ReadArrayCB(func(i *jsoniter.Iterator) bool {
 				vm.MatchingLabels = append(vm.MatchingLabels, i.ReadString())
 				return true
@@ -762,13 +807,13 @@ func decodeVectorAgg(iter *jsoniter.Iterator) (*VectorAggregationExpr, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "operation":
+		case Op:
 			expr.Operation = iter.ReadString()
-		case "params":
+		case Params:
 			expr.Params = iter.ReadInt()
-		case "grouping":
+		case GroupingField:
 			expr.Grouping, err = decodeGrouping(iter)
-		case "inner":
+		case Inner:
 			expr.Left, err = decodeSample(iter)
 		}
 	}
@@ -782,14 +827,14 @@ func decodeRangeAgg(iter *jsoniter.Iterator) (*RangeAggregationExpr, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "op":
+		case Op:
 			expr.Operation = iter.ReadString()
-		case "params":
+		case Params:
 			tmp := iter.ReadFloat64()
 			expr.Params = &tmp
-		case "range":
+		case Range:
 			expr.Left, err = decodeLogRange(iter)
-		case "grouping":
+		case GroupingField:
 			expr.Grouping, err = decodeGrouping(iter)
 		}
 	}
@@ -803,13 +848,13 @@ func decodeLogRange(iter *jsoniter.Iterator) (*LogRange, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "log_selector":
+		case LogSelector:
 			expr.Left, err = decodeLogSelector(iter)
-		case "interval_nanos":
+		case IntervalNanos:
 			expr.Interval = time.Duration(iter.ReadInt64())
-		case "offset_nanos":
+		case OffsetNanos:
 			expr.Offset = time.Duration(iter.ReadInt64())
-		case "unwrap":
+		case Unwrap:
 			expr.Unwrap = decodeUnwrap(iter)
 		}
 	}
@@ -824,18 +869,18 @@ func decodeLabelReplace(iter *jsoniter.Iterator) (*LabelReplaceExpr, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "inner":
+		case Inner:
 			left, err = decodeSample(iter)
 			if err != nil {
 				return nil, err
 			}
-		case "dst":
+		case Dst:
 			dst = iter.ReadString()
-		case "src":
+		case Src:
 			src = iter.ReadString()
-		case "replacement":
+		case Replacement:
 			replacement = iter.ReadString()
-		case "regex":
+		case RegexField:
 			regex = iter.ReadString()
 		}
 	}
@@ -848,7 +893,7 @@ func decodeLiteral(iter *jsoniter.Iterator) (*LiteralExpr, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "val":
+		case Value:
 			expr.Val = iter.ReadFloat64()
 		}
 	}
@@ -861,7 +906,7 @@ func decodeVector(iter *jsoniter.Iterator) (*VectorExpr, error) {
 
 	for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
 		switch f {
-		case "val":
+		case Value:
 			expr.Val = iter.ReadFloat64()
 		}
 	}
diff --git a/pkg/logql/syntax/serialize_test.go b/pkg/logql/syntax/serialize_test.go
index 9d48c6b8c9f38..846e3988b852b 100644
--- a/pkg/logql/syntax/serialize_test.go
+++ b/pkg/logql/syntax/serialize_test.go
@@ -42,9 +42,13 @@ func TestJSONSerializationRoundTrip(t *testing.T) {
 		"filters with bytes": {
 			query: `{app="foo"} |= "bar" | json | ( status_code <500 or ( status_code>200 , size>=2.5KiB ) )`,
 		},
-		"post filters": {
+		"post filter": {
 			query: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
-				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo | __error__ !~".+"[5m]) by (namespace,instance)`,
+				| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo
+				| __error__ !~".+"[5m]) by (namespace,instance)`,
+		},
+		"multiple post filters": {
+			query: `rate({app="foo"} | json | unwrap foo | latency >= 250ms or bytes > 42B or ( status_code < 500 and status_code > 200) or source = ip("") and user = "me" [1m])`,
 		},
 	}
 
@@ -63,7 +67,6 @@ func TestJSONSerializationRoundTrip(t *testing.T) {
 			actual, err := DecodeJSON(buf.String())
 			require.NoError(t, err)
 
-			//require.Equal(t, test.query, actual.String())
 			require.Equal(t, expr.Pretty(0), actual.Pretty(0))
 		})
 	}

From 3f0f8fa5f55209b6c683026b10e023174a5f0f24 Mon Sep 17 00:00:00 2001
From: Poyzan <31743851+poyzannur@users.noreply.github.com>
Date: Wed, 15 Nov 2023 16:57:01 +0000
Subject: [PATCH 04/48] [bloom-compactor] Move meta.json creation at the end of
 compaction cycle (#11234)

**What this PR does / why we need it**:
This is a follow up from https://github.com/grafana/loki/pull/11115
Instead of a creating a meta file per bloom creation, create a meta file
per compaction cycle.

**Which issue(s) this PR fixes**:
Fixes #

**Special notes for your reviewer**:
---
 pkg/bloomcompactor/TODO.md           |  5 +-
 pkg/bloomcompactor/bloomcompactor.go | 77 +++++++++++++++++-----------
 2 files changed, 50 insertions(+), 32 deletions(-)

diff --git a/pkg/bloomcompactor/TODO.md b/pkg/bloomcompactor/TODO.md
index b34fc24aa967a..479f5399a350d 100644
--- a/pkg/bloomcompactor/TODO.md
+++ b/pkg/bloomcompactor/TODO.md
@@ -1,5 +1,4 @@
-* Should we consider configuring falsePosRate of sbf at runtime?
+* Adding falsePosRate of sbf into config
+* Add per-tenant bool to enable compaction
 * Use tarGz, untarGz before uploding blocks to storage
-* Return checksum from `BuildFrom`
-* Move meta creation to an outer layer, ensure one meta.json per compaction cycle.
 * Introduce back `maxLookBackPeriod` as `RejectOldSamplesMaxAge` limit in distributors
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index b517957833a94..c41d4bdfd7c91 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -503,10 +503,13 @@ func createLocalDirName(workingDir string, job Job) string {
 	return filepath.Join(workingDir, dir)
 }
 
-func CompactNewChunks(ctx context.Context, logger log.Logger, job Job, chunks []chunk.Chunk, bt *v1.BloomTokenizer, bloomShipperClient bloomshipper.Client, dst string) (err error) {
+// Compacts given list of chunks, uploads them to storage and returns a list of bloomBlocks
+func CompactNewChunks(ctx context.Context, logger log.Logger, job Job,
+	chunks []chunk.Chunk, bt *v1.BloomTokenizer,
+	bloomShipperClient bloomshipper.Client, dst string) ([]bloomshipper.Block, error) {
 	// Ensure the context has not been canceled (ie. compactor shutdown has been triggered).
 	if err := ctx.Err(); err != nil {
-		return err
+		return nil, err
 	}
 
 	// Create a bloom for this series
@@ -526,31 +529,14 @@ func CompactNewChunks(ctx context.Context, logger log.Logger, job Job, chunks []
 	blocks, err := buildBloomBlock(ctx, logger, bloomForChks, job, dst)
 	if err != nil {
 		level.Error(logger).Log("building bloomBlocks", err)
-		return
+		return nil, err
 	}
-
 	storedBlocks, err := bloomShipperClient.PutBlocks(ctx, []bloomshipper.Block{blocks})
 	if err != nil {
 		level.Error(logger).Log("putting blocks to storage", err)
-		return
-	}
-
-	storedBlockRefs := make([]bloomshipper.BlockRef, len(storedBlocks))
-	// Build and upload meta.json to storage
-	meta := bloomshipper.Meta{
-		// After successful compaction there should be no tombstones
-		Tombstones: make([]bloomshipper.BlockRef, 0),
-		Blocks:     storedBlockRefs,
-	}
-
-	// TODO move this to an outer layer, otherwise creates a meta per block
-	err = bloomShipperClient.PutMeta(ctx, meta)
-	if err != nil {
-		level.Error(logger).Log("putting meta.json to storage", err)
-		return
+		return nil, err
 	}
-
-	return nil
+	return storedBlocks, nil
 }
 
 func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, bloomShipperClient bloomshipper.Client, bt *v1.BloomTokenizer, storeClient storeClient) error {
@@ -559,23 +545,43 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job,
 		return err
 	}
 
-	// TODO call bloomShipperClient.GetMetas to get existing meta.json
+	metaSearchParams := bloomshipper.MetaSearchParams{
+		TenantID:       job.tenantID,
+		MinFingerprint: uint64(job.seriesFP),
+		MaxFingerprint: uint64(job.seriesFP),
+		StartTimestamp: int64(job.from),
+		EndTimestamp:   int64(job.through),
+	}
 	var metas []bloomshipper.Meta
+	//TODO  Configure pool for these to avoid allocations
+	var bloomBlocksRefs []bloomshipper.BlockRef
+	var tombstonedBlockRefs []bloomshipper.BlockRef
+
+	metas, err := bloomShipperClient.GetMetas(ctx, metaSearchParams)
+	if err != nil {
+		return err
+	}
 
 	if len(metas) == 0 {
 		// Get chunks data from list of chunkRefs
-		chks, err := storeClient.chunk.GetChunks(
-			ctx,
-			makeChunkRefs(job.Chunks(), job.Tenant(), job.Fingerprint()),
-		)
+		chks, err := storeClient.chunk.GetChunks(ctx, makeChunkRefs(job.Chunks(), job.Tenant(), job.Fingerprint()))
 		if err != nil {
 			return err
 		}
 
-		err = CompactNewChunks(ctx, logger, job, chks, bt, bloomShipperClient, c.cfg.WorkingDirectory)
+		storedBlocks, err := CompactNewChunks(ctx, logger, job, chks, bt, bloomShipperClient, c.cfg.WorkingDirectory)
 		if err != nil {
-			return err
+			return level.Error(logger).Log("compacting new chunks", err)
 		}
+
+		storedBlockRefs := make([]bloomshipper.BlockRef, len(storedBlocks))
+
+		for i, block := range storedBlocks {
+			storedBlockRefs[i] = block.BlockRef
+		}
+
+		// all blocks are new and active blocks
+		bloomBlocksRefs = storedBlockRefs
 	} else {
 		// TODO complete part 2 - periodic compaction for delta from previous period
 		// When already compacted metas exists
@@ -586,11 +592,24 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job,
 			for _, blockRef := range meta.Blocks {
 				uniqueIndexPaths[blockRef.IndexPath] = struct{}{}
 				// ...
+
+				// the result should return a list of active
+				// blocks and tombstoned bloom blocks.
 			}
 		}
 
 	}
 
+	// After all is done, create one meta file and upload to storage
+	meta := bloomshipper.Meta{
+		Tombstones: tombstonedBlockRefs,
+		Blocks:     bloomBlocksRefs,
+	}
+	err = bloomShipperClient.PutMeta(ctx, meta)
+	if err != nil {
+		level.Error(logger).Log("putting meta.json to storage", err)
+		return err
+	}
 	return nil
 }
 

From d22c1fd39dcbd9b507856cc01786b0b46b1e71fe Mon Sep 17 00:00:00 2001
From: Poyzan <31743851+poyzannur@users.noreply.github.com>
Date: Wed, 15 Nov 2023 21:14:27 +0000
Subject: [PATCH 05/48] [bloom-compactor] Add configs to enable compactor per
 tenant (#11235)

**What this PR does / why we need it**:
We want to control bloom compaction per tenant basis. Adding configs to
enable/disable bloom compactor.

**Which issue(s) this PR fixes**:
Fixes #

**Special notes for your reviewer**:

**Checklist**
- [x] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [x] Documentation added
- [ ] Tests updated
- [ ] `CHANGELOG.md` updated
- [ ] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
- [ ] If the change is deprecating or removing a configuration option,
update the `deprecated-config.yaml` and `deleted-config.yaml` files
respectively in the `tools/deprecated-config-checker` directory.
[Example
PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
---
 docs/sources/configure/_index.md     | 4 ++++
 pkg/bloomcompactor/bloomcompactor.go | 6 ++++++
 pkg/bloomcompactor/config.go         | 1 +
 pkg/bloomcompactor/sharding_test.go  | 4 ++++
 pkg/validation/limits.go             | 6 ++++++
 5 files changed, 21 insertions(+)

diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index e65d025c6449e..1523f2454e19b 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -2959,6 +2959,10 @@ shard_streams:
 # CLI flag: -bloom-compactor.min-table-age
 [bloom_compactor_min_table_age:  | default = 1h]
 
+# Whether to compact chunks into bloom filters.
+# CLI flag: -bloom-compactor.enable-compaction
+[bloom_compactor_enable_compaction:  | default = false]
+
 # Allow user to send structured metadata in push payload.
 # CLI flag: -validation.allow-structured-metadata
 [allow_structured_metadata:  | default = false]
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index c41d4bdfd7c91..71dbb08380d91 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -293,6 +293,12 @@ func (c *Compactor) compactUsers(ctx context.Context, logger log.Logger, sc stor
 			return fmt.Errorf("interrupting compaction of tenants: %w", err)
 		}
 
+		// Skip tenant if compaction is not enabled
+		if !c.limits.BloomCompactorEnabled(tenant) {
+			level.Info(tenantLogger).Log("msg", "compaction disabled for tenant. Skipping.")
+			continue
+		}
+
 		// Skip this table if it is too new/old for the tenant limits.
 		now := model.Now()
 		tableMinAge := c.limits.BloomCompactorMinTableAge(tenant)
diff --git a/pkg/bloomcompactor/config.go b/pkg/bloomcompactor/config.go
index b87aa4e894918..57721850d2927 100644
--- a/pkg/bloomcompactor/config.go
+++ b/pkg/bloomcompactor/config.go
@@ -43,4 +43,5 @@ type Limits interface {
 	BloomCompactorShardSize(tenantID string) int
 	BloomCompactorMaxTableAge(tenantID string) time.Duration
 	BloomCompactorMinTableAge(tenantID string) time.Duration
+	BloomCompactorEnabled(tenantID string) bool
 }
diff --git a/pkg/bloomcompactor/sharding_test.go b/pkg/bloomcompactor/sharding_test.go
index d99f883dd3bbb..1bd7b198648e1 100644
--- a/pkg/bloomcompactor/sharding_test.go
+++ b/pkg/bloomcompactor/sharding_test.go
@@ -143,3 +143,7 @@ func (m mockLimits) BloomCompactorMaxTableAge(_ string) time.Duration {
 func (m mockLimits) BloomCompactorMinTableAge(_ string) time.Duration {
 	return 0
 }
+
+func (m mockLimits) BloomCompactorEnabled(_ string) bool {
+	return false
+}
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 823b23dd93115..0a482b2c0401f 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -185,6 +185,7 @@ type Limits struct {
 	BloomCompactorShardSize   int           `yaml:"bloom_compactor_shard_size" json:"bloom_compactor_shard_size"`
 	BloomCompactorMaxTableAge time.Duration `yaml:"bloom_compactor_max_table_age" json:"bloom_compactor_max_table_age"`
 	BloomCompactorMinTableAge time.Duration `yaml:"bloom_compactor_min_table_age" json:"bloom_compactor_min_table_age"`
+	BloomCompactorEnabled     bool          `yaml:"bloom_compactor_enable_compaction" json:"bloom_compactor_enable_compaction"`
 
 	AllowStructuredMetadata           bool             `yaml:"allow_structured_metadata,omitempty" json:"allow_structured_metadata,omitempty" doc:"description=Allow user to send structured metadata in push payload."`
 	MaxStructuredMetadataSize         flagext.ByteSize `yaml:"max_structured_metadata_size" json:"max_structured_metadata_size" doc:"description=Maximum size accepted for structured metadata per log line."`
@@ -301,6 +302,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
 	f.IntVar(&l.BloomCompactorShardSize, "bloom-compactor.shard-size", 1, "The shard size defines how many bloom compactors should be used by a tenant when computing blooms. If it's set to 0, shuffle sharding is disabled.")
 	f.DurationVar(&l.BloomCompactorMaxTableAge, "bloom-compactor.max-table-age", 7*24*time.Hour, "The maximum age of a table before it is compacted. Do not compact tables older than the the configured time. Default to 7 days. 0s means no limit.")
 	f.DurationVar(&l.BloomCompactorMinTableAge, "bloom-compactor.min-table-age", 1*time.Hour, "The minimum age of a table before it is compacted. Do not compact tables newer than the the configured time. Default to 1 hour. 0s means no limit. This is useful to avoid compacting tables that will be updated with out-of-order writes.")
+	f.BoolVar(&l.BloomCompactorEnabled, "bloom-compactor.enable-compaction", false, "Whether to compact chunks into bloom filters.")
 
 	l.ShardStreams = &shardstreams.Config{}
 	l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f)
@@ -796,6 +798,10 @@ func (o *Overrides) BloomCompactorMinTableAge(userID string) time.Duration {
 	return o.getOverridesForUser(userID).BloomCompactorMinTableAge
 }
 
+func (o *Overrides) BloomCompactorEnabled(userID string) bool {
+	return o.getOverridesForUser(userID).BloomCompactorEnabled
+}
+
 func (o *Overrides) AllowStructuredMetadata(userID string) bool {
 	return o.getOverridesForUser(userID).AllowStructuredMetadata
 }

From 8328345a8bf5c73dfc49600cb5c19d24c7d965b9 Mon Sep 17 00:00:00 2001
From: keyolk 
Date: Thu, 16 Nov 2023 10:12:40 +0900
Subject: [PATCH 06/48] lambda-promtail: fix IAM policy for clouddwatch log
 stream (#10909)

---
 tools/lambda-promtail/main.tf | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tools/lambda-promtail/main.tf b/tools/lambda-promtail/main.tf
index 1b91fdc797c11..37f7e9ede7d0e 100644
--- a/tools/lambda-promtail/main.tf
+++ b/tools/lambda-promtail/main.tf
@@ -64,7 +64,7 @@ data "aws_iam_policy_document" "lambda_cloudwatch" {
       "logs:PutLogEvents",
     ]
     resources = [
-      aws_cloudwatch_log_group.this.arn,
+      format("%s:*", aws_cloudwatch_log_group.this.arn),
     ]
   }
 }
@@ -286,4 +286,4 @@ resource "aws_lambda_event_source_mapping" "this" {
   event_source_arn  = each.value.arn
   function_name     = aws_lambda_function.this.arn
   starting_position = "LATEST"
-}
\ No newline at end of file
+}

From 258a8b31080d919278064e56a121e744973806a5 Mon Sep 17 00:00:00 2001
From: Angus Dippenaar 
Date: Thu, 16 Nov 2023 17:30:54 +0100
Subject: [PATCH 07/48] Update nix configuration (#8452)

Fix nix configuration (mainly failing test), and break up various binaries (ie. logcli, promtail, loki, etc.) into their own pakcages.

---------

Co-authored-by: Trevor Whitney 
---
 .github/workflows/nix-ci.yaml                 | 24 +++++++--
 .golangci.yml                                 |  4 ++
 flake.lock                                    | 30 ++++++++---
 flake.nix                                     | 47 ++++++++++-------
 nix/default.nix                               | 50 ++++++++++++++++---
 nix/overlays/golangci-lint.nix                | 28 -----------
 nix/overlays/helm-docs.nix                    | 25 ----------
 nix/{ => packages}/chart-releaser.nix         |  0
 nix/{ => packages}/faillint.nix               |  0
 nix/{ => packages}/loki.nix                   | 44 +++++++++++-----
 pkg/analytics/seed_test.go                    |  2 +-
 pkg/chunkenc/memchunk_test.go                 |  2 +
 pkg/compactor/compactor_test.go               |  2 +
 pkg/distributor/instance_count_test.go        |  2 +
 pkg/iter/entry_iterator_test.go               |  1 +
 pkg/logql/range_vector_test.go                |  2 +
 pkg/loki/modules_test.go                      |  2 +
 pkg/querier/querier_test.go                   |  1 +
 pkg/storage/batch_test.go                     |  1 +
 .../client/local/boltdb_index_client_test.go  |  1 +
 tools/tsdb/tsdb-map/main_test.go              |  1 +
 21 files changed, 167 insertions(+), 102 deletions(-)
 delete mode 100644 nix/overlays/golangci-lint.nix
 delete mode 100644 nix/overlays/helm-docs.nix
 rename nix/{ => packages}/chart-releaser.nix (100%)
 rename nix/{ => packages}/faillint.nix (100%)
 rename nix/{ => packages}/loki.nix (60%)

diff --git a/.github/workflows/nix-ci.yaml b/.github/workflows/nix-ci.yaml
index c6dbbbbc7f6b9..70e418425b9c2 100644
--- a/.github/workflows/nix-ci.yaml
+++ b/.github/workflows/nix-ci.yaml
@@ -6,12 +6,30 @@ on:
       - "flake.nix"
       - "nix/**"
 jobs:
-  tests:
+  lint:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v3
       - uses: cachix/install-nix-action@v22
         with:
           nix_path: nixpkgs=channel:nixos-unstable
-      - run: nix run .#lint
-      - run: nix build --print-build-logs
+      - run: nix run --print-build-logs .#lint
+  test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - uses: cachix/install-nix-action@v22
+        with:
+          nix_path: nixpkgs=channel:nixos-unstable
+      - run: nix run --print-build-logs .#test
+  packages:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - uses: cachix/install-nix-action@v22
+        with:
+          nix_path: nixpkgs=channel:nixos-unstable
+      - run: nix build --print-build-logs .#logcli
+      - run: nix build --print-build-logs .#loki
+      - run: nix build --print-build-logs .#loki-canary
+      - run: nix build --print-build-logs .#promtail
diff --git a/.golangci.yml b/.golangci.yml
index 8f0686445498d..fb3c1ab689d0e 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -63,6 +63,10 @@ linters-settings:
           - desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
             pkg: github.com/go-kit/kit/log
 
+  misspell:
+    ignore-words:
+      - strat
+
 linters:
   enable:
     - errcheck
diff --git a/flake.lock b/flake.lock
index a1f5c07467865..06c301936e4db 100644
--- a/flake.lock
+++ b/flake.lock
@@ -1,12 +1,15 @@
 {
   "nodes": {
     "flake-utils": {
+      "inputs": {
+        "systems": "systems"
+      },
       "locked": {
-        "lastModified": 1678901627,
-        "narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=",
+        "lastModified": 1694529238,
+        "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
         "owner": "numtide",
         "repo": "flake-utils",
-        "rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6",
+        "rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
         "type": "github"
       },
       "original": {
@@ -17,11 +20,11 @@
     },
     "nixpkgs": {
       "locked": {
-        "lastModified": 1680487167,
-        "narHash": "sha256-9FNIqrxDZgSliGGN2XJJSvcDYmQbgOANaZA4UWnTdg4=",
+        "lastModified": 1699781429,
+        "narHash": "sha256-UYefjidASiLORAjIvVsUHG6WBtRhM67kTjEY4XfZOFs=",
         "owner": "nixos",
         "repo": "nixpkgs",
-        "rev": "53dad94e874c9586e71decf82d972dfb640ef044",
+        "rev": "e44462d6021bfe23dfb24b775cc7c390844f773d",
         "type": "github"
       },
       "original": {
@@ -36,6 +39,21 @@
         "flake-utils": "flake-utils",
         "nixpkgs": "nixpkgs"
       }
+    },
+    "systems": {
+      "locked": {
+        "lastModified": 1681028828,
+        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+        "owner": "nix-systems",
+        "repo": "default",
+        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nix-systems",
+        "repo": "default",
+        "type": "github"
+      }
     }
   },
   "root": "root",
diff --git a/flake.nix b/flake.nix
index ba0399daf11ab..631488a20694d 100644
--- a/flake.nix
+++ b/flake.nix
@@ -14,19 +14,14 @@
     in
     {
       overlays = {
-        golangci-lint = import ./nix/overlays/golangci-lint.nix;
-        helm-docs = import ./nix/overlays/helm-docs.nix;
         default = nix.overlay;
       };
     } //
     flake-utils.lib.eachDefaultSystem (system:
       let
-
         pkgs = import nixpkgs {
           inherit system;
           overlays = [
-            (import ./nix/overlays/golangci-lint.nix)
-            (import ./nix/overlays/helm-docs.nix)
             nix.overlay
           ];
           config = { allowUnfree = true; };
@@ -40,9 +35,12 @@
 
         packages = with pkgs; {
           inherit
+            logcli
             loki
+            loki-canary
             loki-helm-test
-            loki-helm-test-docker;
+            loki-helm-test-docker
+            promtail;
         };
 
         apps = {
@@ -56,21 +54,30 @@
               }/bin/lint.sh";
           };
 
+          test = {
+            type = "app";
+            program = with pkgs; "${
+                (writeShellScriptBin "test.sh" ''
+                  ${loki.overrideAttrs(old: { doCheck = true; })}/bin/loki --version
+                '')
+              }/bin/test.sh";
+          };
+
           loki = {
             type = "app";
-            program = with pkgs; "${loki.overrideAttrs(old: rec { doCheck = false; })}/bin/loki";
+            program = with pkgs; "${loki}/bin/loki";
           };
           promtail = {
             type = "app";
-            program = with pkgs; "${loki.overrideAttrs(old: rec { doCheck = false; })}/bin/promtail";
+            program = with pkgs; "${promtail}/bin/promtail";
           };
           logcli = {
             type = "app";
-            program = with pkgs; "${loki.overrideAttrs(old: rec { doCheck = false; })}/bin/logcli";
+            program = with pkgs; "${logcli}/bin/logcli";
           };
           loki-canary = {
             type = "app";
-            program = with pkgs; "${loki.overrideAttrs(old: rec { doCheck = false; })}/bin/loki-canary";
+            program = with pkgs; "${loki-canary}/bin/loki-canary";
           };
           loki-helm-test = {
             type = "app";
@@ -80,20 +87,22 @@
 
         devShell = pkgs.mkShell {
           nativeBuildInputs = with pkgs; [
+            (import ./packages/chart-releaser.nix {
+              inherit (prev) pkgs lib buildGoModule fetchFromGitHub;
+            })
+
+            chart-testing
+            faillint
             gcc
             go
-            systemd
-            yamllint
-            nixpkgs-fmt
-            statix
-            nettools
-
             golangci-lint
             gotools
             helm-docs
-            faillint
-            chart-testing
-            chart-releaser
+            nettools
+            nixpkgs-fmt
+            statix
+            systemd
+            yamllint
           ];
         };
       });
diff --git a/nix/default.nix b/nix/default.nix
index 5f07c26ee14dd..f9ae62ef73b43 100644
--- a/nix/default.nix
+++ b/nix/default.nix
@@ -30,21 +30,55 @@
         rev = gitRevision;
       };
     in
-    {
+    rec {
       inherit (loki-helm-test) loki-helm-test loki-helm-test-docker;
 
-      loki = prev.callPackage ./loki.nix {
+      loki = prev.callPackage ./packages/loki.nix {
         inherit imageTag;
         version = shortGitRevsion;
         pkgs = prev;
       };
 
-      faillint = prev.callPackage ./faillint.nix {
-        inherit (prev) lib buildGoModule fetchFromGitHub;
-      };
+      logcli = loki.overrideAttrs (oldAttrs: rec {
+        pname = "logcli";
 
-      chart-releaser = prev.callPackage ./chart-releaser.nix {
-        inherit (prev) pkgs lib buildGoModule fetchFromGitHub;
-      };
+        buildPhase = ''
+          export GOCACHE=$TMPDIR/go-cache
+          make clean logcli
+        '';
+
+        installPhase = ''
+          mkdir -p $out/bin
+          install -m755 cmd/logcli/logcli $out/bin/logcli
+        '';
+      });
+
+      loki-canary = loki.overrideAttrs (oldAttrs: rec {
+        pname = "loki-canary";
+
+        buildPhase = ''
+          export GOCACHE=$TMPDIR/go-cache
+          make clean loki-canary
+        '';
+
+        installPhase = ''
+          mkdir -p $out/bin
+          install -m755 cmd/loki-canary/loki-canary $out/bin/loki-canary
+        '';
+      });
+
+      promtail = loki.overrideAttrs (oldAttrs: rec {
+        pname = "promtail";
+
+        buildPhase = ''
+          export GOCACHE=$TMPDIR/go-cache
+          make clean promtail
+        '';
+
+        installPhase = ''
+          mkdir -p $out/bin
+          install -m755 clients/cmd/promtail/promtail $out/bin/promtail
+        '';
+      });
     };
 }
diff --git a/nix/overlays/golangci-lint.nix b/nix/overlays/golangci-lint.nix
deleted file mode 100644
index abbfacf90b0ee..0000000000000
--- a/nix/overlays/golangci-lint.nix
+++ /dev/null
@@ -1,28 +0,0 @@
-final: prev: {
-  golangci-lint = prev.callPackage
-    "${prev.path}/pkgs/development/tools/golangci-lint"
-    {
-      buildGoModule = args:
-        prev.buildGoModule (args // rec {
-          version = "1.51.2";
-
-          src = prev.fetchFromGitHub rec {
-            owner = "golangci";
-            repo = "golangci-lint";
-            rev = "v${version}";
-            sha256 = "F2rkVZ5ia9/wyTw1WIeizFnuaHoS2A8VzVOGDcshy64=";
-          };
-
-          vendorHash =
-            "sha256-JO/mRJB3gRTtBj6pW1267/xXUtalTJo0p3q5e34vqTs=";
-
-          ldflags = [
-            "-s"
-            "-w"
-            "-X main.version=${version}"
-            "-X main.commit=v${version}"
-            "-X main.date=19700101-00:00:00"
-          ];
-        });
-    };
-}
diff --git a/nix/overlays/helm-docs.nix b/nix/overlays/helm-docs.nix
deleted file mode 100644
index 8c4a851399f63..0000000000000
--- a/nix/overlays/helm-docs.nix
+++ /dev/null
@@ -1,25 +0,0 @@
-final: prev: {
-  helm-docs = prev.callPackage
-    "${prev.path}/pkgs/applications/networking/cluster/helm-docs"
-    {
-      buildGoModule = args:
-        prev.buildGoModule (args // rec {
-          version = "1.11.0";
-
-          src = prev.fetchFromGitHub {
-            owner = "norwoodj";
-            repo = "helm-docs";
-            rev = "v${version}";
-            sha256 = "sha256-476ZhjRwHlNJFkHzY8qQ7WbAUUpFNSoxXLGX9esDA/E=";
-          };
-
-          vendorSha256 = "sha256-xXwunk9rmzZEtqmSo8biuXnAjPp7fqWdQ+Kt9+Di9N8=";
-
-          ldflags = [
-            "-w"
-            "-s"
-            "-X main.version=v${version}"
-          ];
-        });
-    };
-}
diff --git a/nix/chart-releaser.nix b/nix/packages/chart-releaser.nix
similarity index 100%
rename from nix/chart-releaser.nix
rename to nix/packages/chart-releaser.nix
diff --git a/nix/faillint.nix b/nix/packages/faillint.nix
similarity index 100%
rename from nix/faillint.nix
rename to nix/packages/faillint.nix
diff --git a/nix/loki.nix b/nix/packages/loki.nix
similarity index 60%
rename from nix/loki.nix
rename to nix/packages/loki.nix
index 246a7dbb3236b..b083db6e4a7d4 100644
--- a/nix/loki.nix
+++ b/nix/packages/loki.nix
@@ -1,22 +1,38 @@
 { pkgs, version, imageTag }:
+let
+  lambda-promtail-gomod = pkgs.buildGoModule {
+    inherit version;
+    pname = "lambda-promtail";
+
+    src = ./../../tools/lambda-promtail;
+    vendorSha256 = "11yNeQb4k5/w0+r+LJOmjXUQRaWvWSXqM+zMHtMVxY8=";
+
+    doCheck = false;
+
+    installPhase = ''
+      runHook preInstall
+      cp -r --reflink=auto vendor $out
+      runHook postInstall
+    '';
+  };
+in
 pkgs.stdenv.mkDerivation {
   inherit version;
 
   pname = "loki";
 
-  src = ./..;
+  src = ./../..;
 
   buildInputs = with pkgs; [
     bash
     gcc
-    go
     git
-    bash
+    go
+    golangci-lint
+    nettools
     systemd
     yamllint
-    nettools
 
-    golangci-lint
     (import ./faillint.nix {
       inherit (pkgs) lib buildGoModule fetchFromGitHub;
     })
@@ -27,9 +43,9 @@ pkgs.stdenv.mkDerivation {
 
     substituteInPlace Makefile \
       --replace "SHELL = /usr/bin/env bash -o pipefail" "SHELL = ${bash}/bin/bash -o pipefail" \
-      --replace "IMAGE_TAG := \$(shell ./tools/image-tag)" "IMAGE_TAG := ${imageTag}" \
+      --replace "IMAGE_TAG ?= \$(shell ./tools/image-tag)" "IMAGE_TAG ?= ${imageTag}" \
       --replace "GIT_REVISION := \$(shell git rev-parse --short HEAD)" "GIT_REVISION := ${version}" \
-      --replace "GIT_BRANCH := \$(shell git rev-parse --abbrev-ref HEAD)" "GIT_BRANCH := nix" \
+      --replace "GIT_BRANCH := \$(shell git rev-parse --abbrev-ref HEAD)" "GIT_BRANCH := nix"
 
     substituteInPlace clients/cmd/fluentd/Makefile \
       --replace "SHELL    = /usr/bin/env bash -o pipefail" "SHELL = ${bash}/bin/bash -o pipefail"
@@ -37,21 +53,25 @@ pkgs.stdenv.mkDerivation {
 
   buildPhase = ''
     export GOCACHE=$TMPDIR/go-cache
-    make clean loki logcli loki-canary promtail
+    export GOMODCACHE=$TMPDIR/gomodcache
+    export GOPROXY=off
+
+    cp -r ${lambda-promtail-gomod} tools/lambda-promtail/vendor
+    make clean loki
   '';
 
-  doCheck = true;
+  doCheck = false;
   checkPhase = ''
     export GOCACHE=$TMPDIR/go-cache
+    export GOMODCACHE=$TMPDIR/gomodcache
     export GOLANGCI_LINT_CACHE=$TMPDIR/go-cache
+    export GOPROXY=off
+
     make lint test
   '';
 
   installPhase = ''
     mkdir -p $out/bin
     install -m755 cmd/loki/loki $out/bin/loki
-    install -m755 cmd/logcli/logcli $out/bin/logcli
-    install -m755 cmd/loki-canary/loki-canary $out/bin/loki-canary
-    install -m755 clients/cmd/promtail/promtail $out/bin/promtail
   '';
 }
diff --git a/pkg/analytics/seed_test.go b/pkg/analytics/seed_test.go
index 6435fabb00151..4229c508d2dfe 100644
--- a/pkg/analytics/seed_test.go
+++ b/pkg/analytics/seed_test.go
@@ -36,7 +36,7 @@ func createMemberlist(t *testing.T, port, memberID int) *memberlist.KV {
 	var cfg memberlist.KVConfig
 	flagext.DefaultValues(&cfg)
 	cfg.TCPTransport = memberlist.TCPTransportConfig{
-		BindAddrs: []string{"0.0.0.0"},
+		BindAddrs: []string{"127.0.0.1"},
 		BindPort:  0,
 	}
 	cfg.GossipInterval = 100 * time.Millisecond
diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go
index 593b3d7de224f..151ad846ec041 100644
--- a/pkg/chunkenc/memchunk_test.go
+++ b/pkg/chunkenc/memchunk_test.go
@@ -714,6 +714,7 @@ func TestChunkStats(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
+	//nolint:revive
 	for it.Next() {
 	}
 	if err := it.Close(); err != nil {
@@ -742,6 +743,7 @@ func TestChunkStats(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
+	//nolint:revive
 	for it.Next() {
 	}
 	if err := it.Close(); err != nil {
diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go
index 79159d06d8284..854339ca6ecaf 100644
--- a/pkg/compactor/compactor_test.go
+++ b/pkg/compactor/compactor_test.go
@@ -21,6 +21,7 @@ import (
 )
 
 const indexTablePrefix = "table_"
+const localhost = "localhost"
 
 func dayFromTime(t model.Time) config.DayTime {
 	parsed, err := time.Parse("2006-01-02", t.Time().In(time.UTC).Format("2006-01-02"))
@@ -41,6 +42,7 @@ func setupTestCompactor(t *testing.T, objectClients map[config.DayTime]client.Ob
 	flagext.DefaultValues(&cfg)
 	cfg.WorkingDirectory = filepath.Join(tempDir, workingDirName)
 	cfg.RetentionEnabled = false
+	cfg.CompactorRing.InstanceAddr = localhost
 
 	if loopbackIFace, err := loki_net.LoopbackInterfaceName(); err == nil {
 		cfg.CompactorRing.InstanceInterfaceNames = append(cfg.CompactorRing.InstanceInterfaceNames, loopbackIFace)
diff --git a/pkg/distributor/instance_count_test.go b/pkg/distributor/instance_count_test.go
index 1103bc82f4a6a..92abf94c45061 100644
--- a/pkg/distributor/instance_count_test.go
+++ b/pkg/distributor/instance_count_test.go
@@ -102,6 +102,8 @@ func TestInstanceCountDelegate_CorrectlyInvokesOtherDelegates(t *testing.T) {
 	delegate = &sentryDelegate{BasicLifecyclerDelegate: delegate, calls: sentry2} // sentry delegate AFTER newHealthyInstancesDelegate
 
 	ringCfg := &RingConfig{}
+	ringCfg.InstanceAddr = "localhost"
+
 	logger := log.With(util_log.Logger, "component", "lifecycler")
 	lifecyclerCfg, err := ringCfg.ToBasicLifecyclerConfig(logger)
 	require.NoError(t, err)
diff --git a/pkg/iter/entry_iterator_test.go b/pkg/iter/entry_iterator_test.go
index 83bf6cbdd9f44..c900f898f1be4 100644
--- a/pkg/iter/entry_iterator_test.go
+++ b/pkg/iter/entry_iterator_test.go
@@ -606,6 +606,7 @@ func Test_DuplicateCount(t *testing.T) {
 			_, ctx := stats.NewContext(context.Background())
 			it := NewMergeEntryIterator(ctx, test.iters, test.direction)
 			defer it.Close()
+			//nolint:revive
 			for it.Next() {
 			}
 			require.Equal(t, test.expectedDuplicates, stats.FromContext(ctx).Result(0, 0, 0).TotalDuplicates())
diff --git a/pkg/logql/range_vector_test.go b/pkg/logql/range_vector_test.go
index 35bfdd2e72749..089bcff9e266a 100644
--- a/pkg/logql/range_vector_test.go
+++ b/pkg/logql/range_vector_test.go
@@ -368,6 +368,7 @@ func Test_RangeVectorIteratorBadLabels(t *testing.T) {
 	ctx, cancel := context.WithCancel(context.Background())
 	go func() {
 		defer cancel()
+		//nolint:revive
 		for it.Next() {
 		}
 	}()
@@ -410,6 +411,7 @@ func Test_InstantQueryRangeVectorAggregations(t *testing.T) {
 				3, 1, start, end, 0)
 			require.NoError(t, err)
 
+			//nolint:revive
 			for it.Next() {
 			}
 			_, value := it.At()
diff --git a/pkg/loki/modules_test.go b/pkg/loki/modules_test.go
index a1cb7335cdcf9..19980e2944120 100644
--- a/pkg/loki/modules_test.go
+++ b/pkg/loki/modules_test.go
@@ -396,6 +396,8 @@ func minimalWorkingConfig(t *testing.T, dir, target string, cfgTransformers ...f
 	cfg.Distributor.DistributorRing.InstanceAddr = localhost
 	cfg.IndexGateway.Mode = indexgateway.SimpleMode
 	cfg.IndexGateway.Ring.InstanceAddr = localhost
+	cfg.BloomCompactor.Ring.InstanceAddr = localhost
+	cfg.BloomGateway.Ring.InstanceAddr = localhost
 	cfg.CompactorConfig.CompactorRing.InstanceAddr = localhost
 	cfg.CompactorConfig.WorkingDirectory = path.Join(dir, "compactor")
 
diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go
index fd21ee47d063b..4c8ee491cde61 100644
--- a/pkg/querier/querier_test.go
+++ b/pkg/querier/querier_test.go
@@ -426,6 +426,7 @@ func TestQuerier_IngesterMaxQueryLookback(t *testing.T) {
 			require.Nil(t, err)
 
 			// since streams are loaded lazily, force iterators to exhaust
+			//nolint:revive
 			for res.Next() {
 			}
 			queryClient.AssertExpectations(t)
diff --git a/pkg/storage/batch_test.go b/pkg/storage/batch_test.go
index 0e39edd86b625..1df906f7dcf2b 100644
--- a/pkg/storage/batch_test.go
+++ b/pkg/storage/batch_test.go
@@ -1742,6 +1742,7 @@ func TestBatchCancel(t *testing.T) {
 	it, err := newLogBatchIterator(ctx, s, NilMetrics, chunks, 1, newMatchers(fooLabels.String()), log.NewNoopPipeline(), logproto.FORWARD, from, time.Now(), nil)
 	require.NoError(t, err)
 	defer require.NoError(t, it.Close())
+	//nolint:revive
 	for it.Next() {
 	}
 	require.Equal(t, context.Canceled, it.Error())
diff --git a/pkg/storage/chunk/client/local/boltdb_index_client_test.go b/pkg/storage/chunk/client/local/boltdb_index_client_test.go
index 6e23bdbbaade3..2b26b5cc32cf9 100644
--- a/pkg/storage/chunk/client/local/boltdb_index_client_test.go
+++ b/pkg/storage/chunk/client/local/boltdb_index_client_test.go
@@ -299,6 +299,7 @@ func Benchmark_Query(b *testing.B) {
 	for i := 0; i < b.N; i++ {
 		err = indexClient.query(context.Background(), entry, func(_ index.Query, read index.ReadBatchResult) bool {
 			iter := read.Iterator()
+			//nolint:revive
 			for iter.Next() {
 			}
 			return true
diff --git a/tools/tsdb/tsdb-map/main_test.go b/tools/tsdb/tsdb-map/main_test.go
index 480c723431e22..bf8c802db8456 100644
--- a/tools/tsdb/tsdb-map/main_test.go
+++ b/tools/tsdb/tsdb-map/main_test.go
@@ -73,6 +73,7 @@ func BenchmarkQuery_PostingsForMatchers(b *testing.B) {
 			for i := 0; i < b.N; i++ {
 				p, _ := tsdb.PostingsForMatchers(reader, nil, bm.matchers...)
 
+				//nolint:revive
 				for p.Next() {
 				}
 			}

From 5c5948367ac19ed13b500ccf397ad89cf138c605 Mon Sep 17 00:00:00 2001
From: Trevor Whitney 
Date: Thu, 16 Nov 2023 11:05:03 -0700
Subject: [PATCH 08/48] Do not run snyk pr comment workflow on forks (#11240)

Only run the snyk pr comment workflow on PRs from branches, not on forks. We can't run this `on: pull_request_target` because in needs access to the `SNYK_TOKEN` secret, and when run `on: pull_request`, forks don't have permissions to comment on the PR (because they don't get the `GITHUB_TOKEN` secret.
---
 .github/workflows/snyk-pr-comment.yml         | 47 ++++++++++++++++++
 ...rability-scan.yml => trivy-pr-comment.yml} | 48 +------------------
 2 files changed, 49 insertions(+), 46 deletions(-)
 create mode 100644 .github/workflows/snyk-pr-comment.yml
 rename .github/workflows/{vulnerability-scan.yml => trivy-pr-comment.yml} (51%)

diff --git a/.github/workflows/snyk-pr-comment.yml b/.github/workflows/snyk-pr-comment.yml
new file mode 100644
index 0000000000000..9eb86f069fc0c
--- /dev/null
+++ b/.github/workflows/snyk-pr-comment.yml
@@ -0,0 +1,47 @@
+name: PR Vulnerability Scan
+on: pull_request
+
+permissions:
+  pull-requests: write
+  issues: write
+
+jobs:
+  snyk:
+    name: Snyk Scan
+    runs-on: ubuntu-latest
+    if: ${{ !github.event.pull_request.head.repo.fork }}
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@master
+      - name: Run Snyk to check for vulnerabilities
+        uses: snyk/actions/golang@master
+        continue-on-error: true # To make sure that PR comment is made
+        env:
+          SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
+        with:
+          command: test
+          args: --severity-threshold=high --json-file-output=snyk.json
+
+      - name: Prepare Snyk message
+        run: |
+          echo "Snyk scan found the following vulnerabilities:" > snyk.txt
+
+      - name: Format Snyk Message
+        uses: sergeysova/jq-action@v2
+        continue-on-error: true
+        with:
+          cmd: jq -r  '.vulnerabilities[] | "* **\(.severity)** - [\(.identifiers.CVE[0])] \(.title) in `\(.moduleName)` v\(.version). Fixed in \(.fixedIn)"' snyk.json >> snyk.txt
+
+      - name: Determine whether to comment
+        continue-on-error: true
+        id: should-comment
+        run: |
+          if [[ $(wc -l < snyk.txt) -gt 1 ]]; then exit 0; fi
+          exit 1
+
+      - name: Comment on PR with Snyk scan results
+        uses: mshick/add-pr-comment@v2
+        if: ${{ steps.should-comment.outcome == 'success' }}
+        with:
+          message-id: snyk-${{ github.event.number }}
+          message-path: snyk.txt
diff --git a/.github/workflows/vulnerability-scan.yml b/.github/workflows/trivy-pr-comment.yml
similarity index 51%
rename from .github/workflows/vulnerability-scan.yml
rename to .github/workflows/trivy-pr-comment.yml
index ddbb926c07057..c57264a790bcd 100644
--- a/.github/workflows/vulnerability-scan.yml
+++ b/.github/workflows/trivy-pr-comment.yml
@@ -1,58 +1,14 @@
 name: PR Vulnerability Scan
-on: pull_request
+on: pull_request_target
 
 permissions:
   pull-requests: write
-  contents: write
+  issues: write
 
 jobs:
-  snyk:
-    name: Snyk Scan
-    runs-on: ubuntu-latest
-    permissions:
-      issues: write
-      pull-requests: write
-    steps:
-      - name: Checkout code
-        uses: actions/checkout@master
-      - name: Run Snyk to check for vulnerabilities
-        uses: snyk/actions/golang@master
-        continue-on-error: true # To make sure that PR comment is made
-        env:
-          SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
-        with:
-          command: test
-          args: --severity-threshold=high --json-file-output=snyk.json
-
-      - name: Prepare Snyk message
-        run: |
-          echo "Snyk scan found the following vulnerabilities:" > snyk.txt
-
-      - name: Format Snyk Message
-        uses: sergeysova/jq-action@v2
-        continue-on-error: true
-        with:
-          cmd: jq -r  '.vulnerabilities[] | "* **\(.severity)** - [\(.identifiers.CVE[0])] \(.title) in `\(.moduleName)` v\(.version). Fixed in \(.fixedIn)"' snyk.json >> snyk.txt
-
-      - name: Determine whether to comment
-        continue-on-error: true
-        id: should-comment
-        run: |
-          if [[ $(wc -l < snyk.txt) -gt 1 ]]; then exit 0; fi
-          exit 1
-
-      - name: Comment on PR with Snyk scan results
-        uses: mshick/add-pr-comment@v2
-        if: ${{ steps.should-comment.outcome == 'success' }}
-        with:
-          message-id: snyk-${{ github.event.number }}
-          message-path: snyk.txt
   trivy:
     name: Trivy Scan
     runs-on: ubuntu-20.04
-    permissions:
-      issues: write
-      pull-requests: write
     steps:
       - name: Checkout code
         uses: actions/checkout@v3

From 4309add1b29a8e3e1bdf42f0d2642c817d2acd6f Mon Sep 17 00:00:00 2001
From: Dylan Guedes 
Date: Thu, 16 Nov 2023 16:59:51 -0300
Subject: [PATCH 09/48] Fix per-pod panel unit (from 'ms' to 's') (#11245)

**What this PR does / why we need it**:
Change per-pod latency panel unit from 'ms' to 's' (the metric is in the 's' unit)
---
 .../dashboards/loki-reads.json                       |  4 ++--
 .../loki-mixin-compiled/dashboards/loki-reads.json   | 12 ++++++------
 .../loki-mixin/dashboards/loki-reads.libsonnet       |  2 +-
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
index 8c0e5e8f63b77..1c563628fdabc 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
@@ -219,7 +219,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 3,
@@ -495,7 +495,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 6,
diff --git a/production/loki-mixin-compiled/dashboards/loki-reads.json b/production/loki-mixin-compiled/dashboards/loki-reads.json
index 54bdb2870da1c..a35120412a3a1 100644
--- a/production/loki-mixin-compiled/dashboards/loki-reads.json
+++ b/production/loki-mixin-compiled/dashboards/loki-reads.json
@@ -219,7 +219,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 3,
@@ -495,7 +495,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 6,
@@ -771,7 +771,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 9,
@@ -1047,7 +1047,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 12,
@@ -1323,7 +1323,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 15,
@@ -1599,7 +1599,7 @@
                            }
                         }
                      },
-                     "unit": "ms"
+                     "unit": "s"
                   },
                   "fill": 1,
                   "id": 18,
diff --git a/production/loki-mixin/dashboards/loki-reads.libsonnet b/production/loki-mixin/dashboards/loki-reads.libsonnet
index a1d68a15637c1..2e9de3d88195f 100644
--- a/production/loki-mixin/dashboards/loki-reads.libsonnet
+++ b/production/loki-mixin/dashboards/loki-reads.libsonnet
@@ -38,7 +38,7 @@ local utils = import 'mixin-utils/utils.libsonnet';
               },
             },
           },
-          unit: 'ms',
+          unit: 's',
         },
       },
 

From 8d0fc171cca5a35be77d3234b0449dc27e90c7df Mon Sep 17 00:00:00 2001
From: Periklis Tsirakidis 
Date: Fri, 17 Nov 2023 14:14:13 +0100
Subject: [PATCH 10/48] operator: Update dependencies and dev tools (#11232)

---
 operator/.bingo/Variables.mk                  |  12 +-
 operator/.bingo/golangci-lint.mod             |   2 +-
 operator/.bingo/golangci-lint.sum             |  66 ++++
 operator/.bingo/operator-sdk.mod              |   2 +-
 operator/.bingo/operator-sdk.sum              |  24 ++
 operator/.bingo/variables.env                 |   4 +-
 operator/CHANGELOG.md                         |   1 +
 .../apis/config/v1/projectconfig_types.go     |   2 +-
 .../loki/alertingrule_controller.go           |   3 +-
 .../controllers/loki/lokistack_controller.go  |  18 +-
 .../loki/lokistack_controller_test.go         |  11 +-
 .../lokistack_zone_labeling_controller.go     |   3 +-
 ...lokistack_zone_labeling_controller_test.go |   6 +-
 .../loki/recordingrule_controller.go          |   3 +-
 operator/docs/operator/feature-gates.md       |   2 +-
 operator/go.mod                               |  42 +--
 operator/go.sum                               | 319 +++---------------
 operator/internal/external/k8s/builder.go     |   7 +-
 operator/internal/external/k8s/client.go      |   3 +
 .../external/k8s/k8sfakes/fake_builder.go     |  13 +-
 .../external/k8s/k8sfakes/fake_client.go      | 159 +++++++++
 .../internal/storage/ca_configmap_test.go     |   3 +-
 operator/internal/status/lokistack.go         |   2 +-
 operator/internal/validation/alertingrule.go  |  16 +-
 .../internal/validation/alertingrule_test.go  |   4 +-
 operator/internal/validation/lokistack.go     |  16 +-
 .../internal/validation/lokistack_test.go     |   4 +-
 operator/internal/validation/recordingrule.go |  16 +-
 .../internal/validation/recordingrule_test.go |   4 +-
 operator/internal/validation/rulerconfig.go   |  16 +-
 .../internal/validation/rulerconfig_test.go   |   4 +-
 operator/main.go                              |   2 +-
 32 files changed, 406 insertions(+), 383 deletions(-)

diff --git a/operator/.bingo/Variables.mk b/operator/.bingo/Variables.mk
index 17f66a8c0ac19..9af26d2119f3d 100644
--- a/operator/.bingo/Variables.mk
+++ b/operator/.bingo/Variables.mk
@@ -41,11 +41,11 @@ $(GOFUMPT): $(BINGO_DIR)/gofumpt.mod
 	@echo "(re)installing $(GOBIN)/gofumpt-v0.5.0"
 	@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=gofumpt.mod -o=$(GOBIN)/gofumpt-v0.5.0 "mvdan.cc/gofumpt"
 
-GOLANGCI_LINT := $(GOBIN)/golangci-lint-v1.54.2
+GOLANGCI_LINT := $(GOBIN)/golangci-lint-v1.55.2
 $(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod
 	@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
-	@echo "(re)installing $(GOBIN)/golangci-lint-v1.54.2"
-	@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v1.54.2 "github.com/golangci/golangci-lint/cmd/golangci-lint"
+	@echo "(re)installing $(GOBIN)/golangci-lint-v1.55.2"
+	@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v1.55.2 "github.com/golangci/golangci-lint/cmd/golangci-lint"
 
 HUGO := $(GOBIN)/hugo-v0.80.0
 $(HUGO): $(BINGO_DIR)/hugo.mod
@@ -83,11 +83,11 @@ $(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod
 	@echo "(re)installing $(GOBIN)/kustomize-v4.5.7"
 	@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v4.5.7 "sigs.k8s.io/kustomize/kustomize/v4"
 
-OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.31.0
+OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.32.0
 $(OPERATOR_SDK): $(BINGO_DIR)/operator-sdk.mod
 	@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
-	@echo "(re)installing $(GOBIN)/operator-sdk-v1.31.0"
-	@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=operator-sdk.mod -o=$(GOBIN)/operator-sdk-v1.31.0 "github.com/operator-framework/operator-sdk/cmd/operator-sdk"
+	@echo "(re)installing $(GOBIN)/operator-sdk-v1.32.0"
+	@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=operator-sdk.mod -o=$(GOBIN)/operator-sdk-v1.32.0 "github.com/operator-framework/operator-sdk/cmd/operator-sdk"
 
 PROMTOOL := $(GOBIN)/promtool-v0.47.1
 $(PROMTOOL): $(BINGO_DIR)/promtool.mod
diff --git a/operator/.bingo/golangci-lint.mod b/operator/.bingo/golangci-lint.mod
index 2be0691976f6b..8b166b49866f6 100644
--- a/operator/.bingo/golangci-lint.mod
+++ b/operator/.bingo/golangci-lint.mod
@@ -2,4 +2,4 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
 
 go 1.20
 
-require github.com/golangci/golangci-lint v1.54.2 // cmd/golangci-lint
+require github.com/golangci/golangci-lint v1.55.2 // cmd/golangci-lint
diff --git a/operator/.bingo/golangci-lint.sum b/operator/.bingo/golangci-lint.sum
index 47bdd1f5071e5..e8fe345ebd33c 100644
--- a/operator/.bingo/golangci-lint.sum
+++ b/operator/.bingo/golangci-lint.sum
@@ -70,6 +70,8 @@ github.com/4meepo/tagalign v1.2.2 h1:kQeUTkFTaBRtd/7jm8OKJl9iHk0gAO+TDFPHGSna0aw
 github.com/4meepo/tagalign v1.2.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
 github.com/4meepo/tagalign v1.3.2 h1:1idD3yxlRGV18VjqtDbqYvQ5pXqQS0wO2dn6M3XstvI=
 github.com/4meepo/tagalign v1.3.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
+github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw=
+github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
 github.com/Abirdcfly/dupword v0.0.7 h1:z14n0yytA3wNO2gpCD/jVtp/acEXPGmYu0esewpBt6Q=
 github.com/Abirdcfly/dupword v0.0.7/go.mod h1:K/4M1kj+Zh39d2aotRwypvasonOyAMH1c/IZJzE0dmk=
 github.com/Abirdcfly/dupword v0.0.9 h1:MxprGjKq3yDBICXDgEEsyGirIXfMYXkLNT/agPsE1tk=
@@ -78,6 +80,8 @@ github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16
 github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA=
 github.com/Abirdcfly/dupword v0.0.12 h1:56NnOyrXzChj07BDFjeRA+IUzSz01jmzEq+G4kEgFhc=
 github.com/Abirdcfly/dupword v0.0.12/go.mod h1:+us/TGct/nI9Ndcbcp3rgNcQzctTj68pq7TcgNpLfdI=
+github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo=
+github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y=
 github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako=
 github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU=
 github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls=
@@ -90,6 +94,8 @@ github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l
 github.com/Antonboom/nilnil v0.1.5/go.mod h1:I24toVuBKhfP5teihGWctrRiPbRKHwZIFOvc6v3HZXk=
 github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow=
 github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ=
+github.com/Antonboom/testifylint v0.2.3 h1:MFq9zyL+rIVpsvLX4vDPLojgN7qODzWsrnftNX2Qh60=
+github.com/Antonboom/testifylint v0.2.3/go.mod h1:IYaXaOX9NbfAyO+Y04nfjGI8wDemC1rUyM/cYolz018=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
 github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
@@ -122,6 +128,8 @@ github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZ
 github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
 github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY=
 github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ=
+github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8=
+github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -181,16 +189,24 @@ github.com/breml/bidichk v0.2.3 h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI=
 github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A=
 github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8=
 github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
+github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY=
+github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ=
 github.com/breml/errchkjson v0.3.0 h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw=
 github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU=
 github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ=
 github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
+github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA=
+github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U=
 github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=
 github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
 github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4=
 github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
+github.com/butuzov/ireturn v0.2.2 h1:jWI36dxXwVrI+RnXDwux2IZOewpmfv930OuIRfaBUJ0=
+github.com/butuzov/ireturn v0.2.2/go.mod h1:RfGHUvvAuFFxoHKf4Z8Yxuh6OjlCw1KvR2zM1NFHeBk=
 github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI=
 github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE=
+github.com/catenacyber/perfsprint v0.2.0 h1:azOocHLscPjqXVJ7Mf14Zjlkn4uNua0+Hcg1wTR6vUo=
+github.com/catenacyber/perfsprint v0.2.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50=
 github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4=
 github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -211,6 +227,8 @@ github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348 h1:cy5GCEZLUCshCGC
 github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348/go.mod h1:f/miWtG3SSuTxKsNK3o58H1xl+XV6ZIfbC6p7lPPB8U=
 github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0=
 github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
+github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
+github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -256,6 +274,8 @@ github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0=
 github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
 github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A=
 github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
+github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y=
+github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
 github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -305,6 +325,8 @@ github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3n
 github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
 github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghostiam/protogetter v0.2.3 h1:qdv2pzo3BpLqezwqfGDLZ+nHEYmc5bUpIdsMbBVwMjw=
+github.com/ghostiam/protogetter v0.2.3/go.mod h1:KmNLOsy1v04hKbvZs8EfGI1fk39AgTdRDxWNYPfXVc4=
 github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw=
 github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo=
 github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY=
@@ -422,6 +444,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPSh
 github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
 github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
 github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM=
 github.com/golangci/golangci-lint v1.47.2 h1:qvMDVv49Hrx3PSEXZ0bD/yhwSbhsOihQjFYCKieegIw=
 github.com/golangci/golangci-lint v1.47.2/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg=
 github.com/golangci/golangci-lint v1.50.0 h1:+Xmyt8rKLauNLp2gzcxKMN8VNGqGc5Avc2ZLTwIOpEA=
@@ -434,6 +458,8 @@ github.com/golangci/golangci-lint v1.53.3 h1:CUcRafczT4t1F+mvdkUm6KuOpxUZTl0yWN/
 github.com/golangci/golangci-lint v1.53.3/go.mod h1:W4Gg3ONq6p3Jl+0s/h9Gr0j7yEgHJWWZO2bHl2tBUXM=
 github.com/golangci/golangci-lint v1.54.2 h1:oR9zxfWYxt7hFqk6+fw6Enr+E7F0SN2nqHhJYyIb0yo=
 github.com/golangci/golangci-lint v1.54.2/go.mod h1:vnsaCTPKCI2wreL9tv7RkHDwUrz3htLjed6+6UsvcwU=
+github.com/golangci/golangci-lint v1.55.2 h1:yllEIsSJ7MtlDBwDJ9IMBkyEUz2fYE0b5B8IUgO1oP8=
+github.com/golangci/golangci-lint v1.55.2/go.mod h1:H60CZ0fuqoTwlTvnbyjhpZPWp7KmsjwV2yupIMiMXbM=
 github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
 github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
 github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
@@ -447,6 +473,8 @@ github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Se
 github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI=
 github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ=
 github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
+github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU=
+github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA=
 github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
 github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -470,6 +498,8 @@ github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -599,6 +629,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
 github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
 github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA=
+github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
 github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
 github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
 github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
@@ -691,6 +723,8 @@ github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCE
 github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
 github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
 github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/macabu/inamedparam v0.1.2 h1:RR5cnayM6Q7cDhQol32DE2BGAPGMnffJ31LFE+UklaU=
+github.com/macabu/inamedparam v0.1.2/go.mod h1:Xg25QvY7IBRl1KLPV9Rbml8JOMZtF/iAkNkmV7eQgjw=
 github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
 github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
 github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
@@ -746,6 +780,8 @@ github.com/mgechev/revive v1.2.5 h1:UF9AR8pOAuwNmhXj2odp4mxv9Nx2qUIwVz8ZsU+Mbec=
 github.com/mgechev/revive v1.2.5/go.mod h1:nFOXent79jMTISAfOAasKfy0Z2Ejq0WX7Qn/KAdYopI=
 github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U=
 github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0=
+github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc=
+github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw=
 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
 github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
 github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
@@ -806,6 +842,8 @@ github.com/nunnatsa/ginkgolinter v0.12.1 h1:vwOqb5Nu05OikTXqhvLdHCGcx5uthIYIl0t7
 github.com/nunnatsa/ginkgolinter v0.12.1/go.mod h1:AK8Ab1PypVrcGUusuKD8RDcl2KgsIwvNaaxAlyHSzso=
 github.com/nunnatsa/ginkgolinter v0.13.5 h1:fOsPB4CEZOPkyMqF4B9hoqOpooFWU7vWSVkCSscVpgU=
 github.com/nunnatsa/ginkgolinter v0.13.5/go.mod h1:OBHy4536xtuX3102NM63XRtOyxqZOO02chsaeDWXVO8=
+github.com/nunnatsa/ginkgolinter v0.14.1 h1:khx0CqR5U4ghsscjJ+lZVthp3zjIFytRXPTaQ/TMiyA=
+github.com/nunnatsa/ginkgolinter v0.14.1/go.mod h1:nY0pafUSst7v7F637e7fymaMlQqI9c0Wka2fGsDkzWg=
 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
 github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
 github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
@@ -863,6 +901,8 @@ github.com/polyfloyd/go-errorlint v1.4.2 h1:CU+O4181IxFDdPH6t/HT7IiDj1I7zxNi1RIU
 github.com/polyfloyd/go-errorlint v1.4.2/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
 github.com/polyfloyd/go-errorlint v1.4.4 h1:A9gytp+p6TYqeALTYRoxJESYP8wJRETRX2xzGWFsEBU=
 github.com/polyfloyd/go-errorlint v1.4.4/go.mod h1:ry5NqF7l9Q77V+XqAfUg1zfryrEtyac3G5+WVpIK0xU=
+github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7ryNe7LqI=
+github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk=
 github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
 github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
 github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
@@ -939,6 +979,8 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8
 github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
 github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI=
 github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
+github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU=
+github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ=
 github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
 github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
@@ -964,6 +1006,8 @@ github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyO
 github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI=
 github.com/securego/gosec/v2 v2.17.0 h1:ZpAStTDKY39insEG9OH6kV3IkhQZPTq9a9eGOLOjcdI=
 github.com/securego/gosec/v2 v2.17.0/go.mod h1:lt+mgC91VSmriVoJLentrMkRCYs+HLTBnUFUBuhV2hc=
+github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I=
+github.com/securego/gosec/v2 v2.18.2/go.mod h1:xUuqSF6i0So56Y2wwohWAmB07EdBkUN6crbLlHwbyJs=
 github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
 github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
 github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
@@ -1081,6 +1125,8 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=
 github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
 github.com/tetafro/godot v1.4.14 h1:ScO641OHpf9UpHPk8fCknSuXNMpi4iFlwuWoBs3L+1s=
 github.com/tetafro/godot v1.4.14/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
+github.com/tetafro/godot v1.4.15 h1:QzdIs+XB8q+U1WmQEWKHQbKmCw06QuQM7gLx/dky2RM=
+github.com/tetafro/godot v1.4.15/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
 github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro=
 github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
 github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e h1:MV6KaVu/hzByHP0UvJ4HcMGE/8a6A4Rggc/0wx2AvJo=
@@ -1121,6 +1167,8 @@ github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842
 github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
 github.com/uudashr/gocognit v1.0.7 h1:e9aFXgKgUJrQ5+bs61zBigmj7bFJ/5cC6HmMahVzuDo=
 github.com/uudashr/gocognit v1.0.7/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
+github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI=
+github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k=
 github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
 github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
 github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
@@ -1128,6 +1176,8 @@ github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7Fw
 github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
 github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw=
 github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA=
+github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU=
+github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
@@ -1158,6 +1208,10 @@ gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0=
 gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
 gitlab.com/bosi/decorder v0.4.0 h1:HWuxAhSxIvsITcXeP+iIRg9d1cVfvVkmlF7M68GaoDY=
 gitlab.com/bosi/decorder v0.4.0/go.mod h1:xarnteyUoJiOTEldDysquWKTVDCKo2TOIOIibSuWqOg=
+gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4=
+gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA=
+go-simpler.org/sloglint v0.1.2 h1:IjdhF8NPxyn0Ckn2+fuIof7ntSnVUAqBFcQRrnG9AiM=
+go-simpler.org/sloglint v0.1.2/go.mod h1:2LL+QImPfTslD5muNPydAEYmpXIj6o/WYcqnJjLi4o4=
 go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
 go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
 go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
@@ -1283,6 +1337,8 @@ golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
 golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
 golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
 golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1392,6 +1448,8 @@ golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
 golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
 golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1514,6 +1572,8 @@ golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
 golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
 golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -1541,6 +1601,8 @@ golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
 golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
 golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
 golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1660,6 +1722,8 @@ golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
 golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
 golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
 golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
+golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1903,6 +1967,8 @@ honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw=
 honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
 honnef.co/go/tools v0.4.5 h1:YGD4H+SuIOOqsyoLOpZDWcieM28W47/zRO7f+9V3nvo=
 honnef.co/go/tools v0.4.5/go.mod h1:GUV+uIBCLpdf0/v6UhHHG/yzI/z6qPskBeQCjcNB96k=
+honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8=
+honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
 mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8=
 mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM=
 mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ=
diff --git a/operator/.bingo/operator-sdk.mod b/operator/.bingo/operator-sdk.mod
index 774411872e4db..bb4d71328bec2 100644
--- a/operator/.bingo/operator-sdk.mod
+++ b/operator/.bingo/operator-sdk.mod
@@ -8,4 +8,4 @@ replace github.com/docker/distribution => github.com/docker/distribution v0.0.0-
 
 replace github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.10.0
 
-require github.com/operator-framework/operator-sdk v1.31.0 // cmd/operator-sdk
+require github.com/operator-framework/operator-sdk v1.32.0 // cmd/operator-sdk
diff --git a/operator/.bingo/operator-sdk.sum b/operator/.bingo/operator-sdk.sum
index f805302708505..06921d8aa9c0b 100644
--- a/operator/.bingo/operator-sdk.sum
+++ b/operator/.bingo/operator-sdk.sum
@@ -1298,6 +1298,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
 github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
 github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
 github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/operator-framework/ansible-operator-plugins v1.32.0 h1:pBHNI9hoLYTMHsf6w6ozVYucJx+piMmfm86v8pbKFOY=
+github.com/operator-framework/ansible-operator-plugins v1.32.0/go.mod h1:G46APyI1JOzYW8BaSuPHxkTCfnEd0Hl0mwW8+bOn4IY=
 github.com/operator-framework/api v0.7.1/go.mod h1:L7IvLd/ckxJEJg/t4oTTlnHKAJIP/p51AvEslW3wYdY=
 github.com/operator-framework/api v0.10.0/go.mod h1:tV0BUNvly7szq28ZPBXhjp1Sqg5yHCOeX19ui9K4vjI=
 github.com/operator-framework/api v0.10.5 h1:/WvLKOPo8zZMyEmuW0kLC0PJBt4Xal8HZkFioKIxqTA=
@@ -1319,6 +1321,8 @@ github.com/operator-framework/api v0.17.4-0.20221221181915-f1b729684854 h1:/EFqP
 github.com/operator-framework/api v0.17.4-0.20221221181915-f1b729684854/go.mod h1:34tb98EwTN5SZLkgoxwvRkhMJKLHUWHOrrcv1ZwvEeA=
 github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42 h1:d/Pnr19TnmIq3zQ6ebewC+5jt5zqYbRkvYd37YZENQY=
 github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24=
+github.com/operator-framework/api v0.17.5 h1:9d0pc6m1Vp4QeS8i5dhl/B0nifhKQdtw+iFsNx0An0Q=
+github.com/operator-framework/api v0.17.5/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24=
 github.com/operator-framework/helm-operator-plugins v0.0.9 h1:G5aBY5sPrNXcRiKLpAaBMOYm7q0+qCmk9XWOAL/ZJuc=
 github.com/operator-framework/helm-operator-plugins v0.0.10 h1:27o8kDaLY9A3DKp2v6s+cAhebM0gXyfgYVc54x7Vtgc=
 github.com/operator-framework/helm-operator-plugins v0.0.12-0.20220613184440-7329cace347f h1:lS/IvqlvEQGIwXE0VlW+mOCmFEXBKywNbGQDrK++r/g=
@@ -1393,6 +1397,8 @@ github.com/operator-framework/operator-sdk v1.27.0 h1:Uhnhi88U2jWkagWB2G60qNbTv2
 github.com/operator-framework/operator-sdk v1.27.0/go.mod h1:DhJvT5akOZNilQu7OVUd8I+LLzaXv+S8VZktElQCtOs=
 github.com/operator-framework/operator-sdk v1.31.0 h1:jnTK3lQ8JkRE0sRV3AdTmNKBZmYZaCiEkPcm3LWGKxE=
 github.com/operator-framework/operator-sdk v1.31.0/go.mod h1:j51dzpQQTMlNxtn5ThSOfRZP7N2iUiGaAPj9uJN5JAo=
+github.com/operator-framework/operator-sdk v1.32.0 h1:2Wx7EVwrXhxEuwsm0RsgF3wEgeSGc7jwpnAmBG6f/y8=
+github.com/operator-framework/operator-sdk v1.32.0/go.mod h1:NnTOc0sOT8YnPF7Q7o8dSAhHBpobVHyJ8n8cQMOnmco=
 github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
 github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
 github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
@@ -1863,6 +1869,8 @@ golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4
 golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
 golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
 golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1910,6 +1918,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVD
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
 golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1998,6 +2008,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
 golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
 golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
 golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -2044,6 +2056,8 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpi
 golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
 golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
 golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -2188,6 +2202,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
 golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
@@ -2202,6 +2218,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
 golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
 golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
 golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2219,6 +2237,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
 golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2332,6 +2352,8 @@ golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
 golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
 golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2899,6 +2921,8 @@ sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRM
 sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI=
 sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s=
 sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
+sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA=
+sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
 sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU=
 sigs.k8s.io/controller-tools v0.6.0 h1:o2Fm1K7CmIp8OVaBtXsWB/ssBAzyoKZPPAGR3VuxaKs=
 sigs.k8s.io/controller-tools v0.6.0/go.mod h1:baRMVPrctU77F+rfAuH2uPqW93k6yQnZA2dhUOr7ihc=
diff --git a/operator/.bingo/variables.env b/operator/.bingo/variables.env
index 72d37aeec6642..62b174cf69fc2 100644
--- a/operator/.bingo/variables.env
+++ b/operator/.bingo/variables.env
@@ -16,7 +16,7 @@ GEN_CRD_API_REFERENCE_DOCS="${GOBIN}/gen-crd-api-reference-docs-v0.0.3"
 
 GOFUMPT="${GOBIN}/gofumpt-v0.5.0"
 
-GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.54.2"
+GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.55.2"
 
 HUGO="${GOBIN}/hugo-v0.80.0"
 
@@ -30,7 +30,7 @@ KIND="${GOBIN}/kind-v0.20.0"
 
 KUSTOMIZE="${GOBIN}/kustomize-v4.5.7"
 
-OPERATOR_SDK="${GOBIN}/operator-sdk-v1.31.0"
+OPERATOR_SDK="${GOBIN}/operator-sdk-v1.32.0"
 
 PROMTOOL="${GOBIN}/promtool-v0.47.1"
 
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 8e5fffc23b543..0ad4b10eada0d 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
 ## Main
 
+- [11232](https://github.com/grafana/loki/pull/11232) **periklis**: Update dependencies and dev tools
 - [11129](https://github.com/grafana/loki/pull/11129) **periklis**: Update deps to secure webhooks for CVE-2023-44487
 
 ## 0.5.0 (2023-10-24)
diff --git a/operator/apis/config/v1/projectconfig_types.go b/operator/apis/config/v1/projectconfig_types.go
index bfbaee8ebd8b6..b6a80175266b6 100644
--- a/operator/apis/config/v1/projectconfig_types.go
+++ b/operator/apis/config/v1/projectconfig_types.go
@@ -2,7 +2,7 @@ package v1
 
 import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	cfg "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
+	cfg "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" //nolint:staticcheck
 )
 
 // BuiltInCertManagement is the configuration for the built-in facility to generate and rotate
diff --git a/operator/controllers/loki/alertingrule_controller.go b/operator/controllers/loki/alertingrule_controller.go
index e6d1676c76fce..8840141d63e5e 100644
--- a/operator/controllers/loki/alertingrule_controller.go
+++ b/operator/controllers/loki/alertingrule_controller.go
@@ -10,7 +10,6 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/builder"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/handler"
-	"sigs.k8s.io/controller-runtime/pkg/source"
 
 	lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
 	"github.com/grafana/loki/operator/controllers/loki/internal/lokistack"
@@ -49,6 +48,6 @@ func (r *AlertingRuleReconciler) Reconcile(ctx context.Context, _ ctrl.Request)
 func (r *AlertingRuleReconciler) SetupWithManager(mgr ctrl.Manager) error {
 	return ctrl.NewControllerManagedBy(mgr).
 		For(&lokiv1.AlertingRule{}).
-		Watches(&source.Kind{Type: &corev1.Namespace{}}, &handler.EnqueueRequestForObject{}, builder.OnlyMetadata).
+		Watches(&corev1.Namespace{}, &handler.EnqueueRequestForObject{}, builder.OnlyMetadata).
 		Complete(r)
 }
diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/controllers/loki/lokistack_controller.go
index 3a0e70d4d0181..708390b62c846 100644
--- a/operator/controllers/loki/lokistack_controller.go
+++ b/operator/controllers/loki/lokistack_controller.go
@@ -25,7 +25,6 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/predicate"
 	"sigs.k8s.io/controller-runtime/pkg/reconcile"
-	"sigs.k8s.io/controller-runtime/pkg/source"
 
 	configv1 "github.com/grafana/loki/operator/apis/config/v1"
 	lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
@@ -206,8 +205,8 @@ func (r *LokiStackReconciler) buildController(bld k8s.Builder) error {
 		Owns(&rbacv1.ClusterRoleBinding{}, updateOrDeleteOnlyPred).
 		Owns(&rbacv1.Role{}, updateOrDeleteOnlyPred).
 		Owns(&rbacv1.RoleBinding{}, updateOrDeleteOnlyPred).
-		Watches(&source.Kind{Type: &corev1.Service{}}, r.enqueueForAlertManagerServices(), createUpdateOrDeletePred).
-		Watches(&source.Kind{Type: &corev1.Secret{}}, r.enqueueForStorageSecret(), createUpdateOrDeletePred)
+		Watches(&corev1.Service{}, r.enqueueForAlertManagerServices(), createUpdateOrDeletePred).
+		Watches(&corev1.Secret{}, r.enqueueForStorageSecret(), createUpdateOrDeletePred)
 
 	if r.FeatureGates.LokiStackAlerts {
 		bld = bld.Owns(&monitoringv1.PrometheusRule{}, updateOrDeleteOnlyPred)
@@ -220,19 +219,18 @@ func (r *LokiStackReconciler) buildController(bld k8s.Builder) error {
 	}
 
 	if r.FeatureGates.OpenShift.ClusterTLSPolicy {
-		bld = bld.Watches(&source.Kind{Type: &openshiftconfigv1.APIServer{}}, r.enqueueAllLokiStacksHandler(), updateOrDeleteOnlyPred)
+		bld = bld.Watches(&openshiftconfigv1.APIServer{}, r.enqueueAllLokiStacksHandler(), updateOrDeleteOnlyPred)
 	}
 
 	if r.FeatureGates.OpenShift.ClusterProxy {
-		bld = bld.Watches(&source.Kind{Type: &openshiftconfigv1.Proxy{}}, r.enqueueAllLokiStacksHandler(), updateOrDeleteOnlyPred)
+		bld = bld.Watches(&openshiftconfigv1.Proxy{}, r.enqueueAllLokiStacksHandler(), updateOrDeleteOnlyPred)
 	}
 
 	return bld.Complete(r)
 }
 
 func (r *LokiStackReconciler) enqueueAllLokiStacksHandler() handler.EventHandler {
-	ctx := context.TODO()
-	return handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request {
+	return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
 		lokiStacks := &lokiv1.LokiStackList{}
 		if err := r.Client.List(ctx, lokiStacks); err != nil {
 			r.Log.Error(err, "Error getting LokiStack resources in event handler")
@@ -268,8 +266,7 @@ func statusDifferent(e event.UpdateEvent) bool {
 }
 
 func (r *LokiStackReconciler) enqueueForAlertManagerServices() handler.EventHandler {
-	ctx := context.TODO()
-	return handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request {
+	return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
 		lokiStacks := &lokiv1.LokiStackList{}
 		if err := r.Client.List(ctx, lokiStacks); err != nil {
 			r.Log.Error(err, "Error getting LokiStack resources in event handler")
@@ -301,8 +298,7 @@ func (r *LokiStackReconciler) enqueueForAlertManagerServices() handler.EventHand
 }
 
 func (r *LokiStackReconciler) enqueueForStorageSecret() handler.EventHandler {
-	ctx := context.TODO()
-	return handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request {
+	return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
 		lokiStacks := &lokiv1.LokiStackList{}
 		if err := r.Client.List(ctx, lokiStacks); err != nil {
 			r.Log.Error(err, "Error getting LokiStack resources in event handler")
diff --git a/operator/controllers/loki/lokistack_controller_test.go b/operator/controllers/loki/lokistack_controller_test.go
index e90004c691a45..df85038a50cdc 100644
--- a/operator/controllers/loki/lokistack_controller_test.go
+++ b/operator/controllers/loki/lokistack_controller_test.go
@@ -23,7 +23,6 @@ import (
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/builder"
 	"sigs.k8s.io/controller-runtime/pkg/client"
-	"sigs.k8s.io/controller-runtime/pkg/source"
 )
 
 var (
@@ -197,12 +196,12 @@ func TestLokiStackController_RegisterWatchedResources(t *testing.T) {
 		index             int
 		watchesCallsCount int
 		featureGates      configv1.FeatureGates
-		src               source.Source
+		src               client.Object
 		pred              builder.OwnsOption
 	}
 	table := []test{
 		{
-			src:               &source.Kind{Type: &openshiftconfigv1.APIServer{}},
+			src:               &openshiftconfigv1.APIServer{},
 			index:             2,
 			watchesCallsCount: 3,
 			featureGates: configv1.FeatureGates{
@@ -213,7 +212,7 @@ func TestLokiStackController_RegisterWatchedResources(t *testing.T) {
 			pred: updateOrDeleteOnlyPred,
 		},
 		{
-			src:               &source.Kind{Type: &openshiftconfigv1.Proxy{}},
+			src:               &openshiftconfigv1.Proxy{},
 			index:             2,
 			watchesCallsCount: 3,
 			featureGates: configv1.FeatureGates{
@@ -224,14 +223,14 @@ func TestLokiStackController_RegisterWatchedResources(t *testing.T) {
 			pred: updateOrDeleteOnlyPred,
 		},
 		{
-			src:               &source.Kind{Type: &corev1.Service{}},
+			src:               &corev1.Service{},
 			index:             0,
 			watchesCallsCount: 2,
 			featureGates:      configv1.FeatureGates{},
 			pred:              createUpdateOrDeletePred,
 		},
 		{
-			src:               &source.Kind{Type: &corev1.Secret{}},
+			src:               &corev1.Secret{},
 			index:             1,
 			watchesCallsCount: 2,
 			featureGates:      configv1.FeatureGates{},
diff --git a/operator/controllers/loki/lokistack_zone_labeling_controller.go b/operator/controllers/loki/lokistack_zone_labeling_controller.go
index 5eea1fd20c43f..1012f6a2f5dc6 100644
--- a/operator/controllers/loki/lokistack_zone_labeling_controller.go
+++ b/operator/controllers/loki/lokistack_zone_labeling_controller.go
@@ -13,7 +13,6 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/event"
 	"sigs.k8s.io/controller-runtime/pkg/handler"
 	"sigs.k8s.io/controller-runtime/pkg/predicate"
-	"sigs.k8s.io/controller-runtime/pkg/source"
 
 	lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
 	"github.com/grafana/loki/operator/internal/external/k8s"
@@ -64,7 +63,7 @@ func (r *LokiStackZoneAwarePodReconciler) SetupWithManager(mgr ctrl.Manager) err
 func (r *LokiStackZoneAwarePodReconciler) buildController(bld k8s.Builder) error {
 	return bld.
 		Named("ZoneAwarePod").
-		Watches(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}, createOrUpdatePodWithLabelPred).
+		Watches(&corev1.Pod{}, &handler.EnqueueRequestForObject{}, createOrUpdatePodWithLabelPred).
 		Complete(r)
 }
 
diff --git a/operator/controllers/loki/lokistack_zone_labeling_controller_test.go b/operator/controllers/loki/lokistack_zone_labeling_controller_test.go
index f0802a99ff742..5fffbe894bf91 100644
--- a/operator/controllers/loki/lokistack_zone_labeling_controller_test.go
+++ b/operator/controllers/loki/lokistack_zone_labeling_controller_test.go
@@ -6,7 +6,7 @@ import (
 	"github.com/stretchr/testify/require"
 	corev1 "k8s.io/api/core/v1"
 	"sigs.k8s.io/controller-runtime/pkg/builder"
-	"sigs.k8s.io/controller-runtime/pkg/source"
+	"sigs.k8s.io/controller-runtime/pkg/client"
 
 	"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
 )
@@ -18,12 +18,12 @@ func TestLokiStackZoneAwarePodController_RegisterWatchedResources(t *testing.T)
 	type test struct {
 		index             int
 		watchesCallsCount int
-		src               source.Source
+		src               client.Object
 		pred              builder.OwnsOption
 	}
 	table := []test{
 		{
-			src:               &source.Kind{Type: &corev1.Pod{}},
+			src:               &corev1.Pod{},
 			index:             0,
 			watchesCallsCount: 1,
 			pred:              createOrUpdatePodWithLabelPred,
diff --git a/operator/controllers/loki/recordingrule_controller.go b/operator/controllers/loki/recordingrule_controller.go
index a07268cd1894e..adb6dbf80f194 100644
--- a/operator/controllers/loki/recordingrule_controller.go
+++ b/operator/controllers/loki/recordingrule_controller.go
@@ -10,7 +10,6 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/builder"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/handler"
-	"sigs.k8s.io/controller-runtime/pkg/source"
 
 	lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
 	"github.com/grafana/loki/operator/controllers/loki/internal/lokistack"
@@ -49,6 +48,6 @@ func (r *RecordingRuleReconciler) Reconcile(ctx context.Context, req ctrl.Reques
 func (r *RecordingRuleReconciler) SetupWithManager(mgr ctrl.Manager) error {
 	return ctrl.NewControllerManagedBy(mgr).
 		For(&lokiv1.RecordingRule{}).
-		Watches(&source.Kind{Type: &corev1.Namespace{}}, &handler.EnqueueRequestForObject{}, builder.OnlyMetadata).
+		Watches(&corev1.Namespace{}, &handler.EnqueueRequestForObject{}, builder.OnlyMetadata).
 		Complete(r)
 }
diff --git a/operator/docs/operator/feature-gates.md b/operator/docs/operator/feature-gates.md
index da0c851388ee2..7475dfa8a1cc0 100644
--- a/operator/docs/operator/feature-gates.md
+++ b/operator/docs/operator/feature-gates.md
@@ -519,7 +519,7 @@ K8S Controller-runtime v1alpha1.ControllerMetrics
 
 
 (Optional)
-

Metrics contains thw controller metrics configuration

+

Metrics contains the controller metrics configuration

diff --git a/operator/go.mod b/operator/go.mod index f42b1529aca4c..0fc901ddab734 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -4,27 +4,27 @@ go 1.20 require ( github.com/ViaQ/logerr/v2 v2.1.0 - github.com/go-logr/logr v1.2.3 + github.com/go-logr/logr v1.2.4 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.1 github.com/grafana/loki v1.6.2-0.20230403212622-90888a0cc737 github.com/grafana/loki/operator/apis/loki v0.0.0-00010101000000-000000000000 github.com/imdario/mergo v0.3.13 github.com/maxbrunsfeld/counterfeiter/v6 v6.7.0 - github.com/openshift/api v0.0.0-20230228142948-d170fcdc0fa6 // release-4.13 - github.com/openshift/library-go v0.0.0-20230302173334-c5e706838384 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0 + github.com/openshift/api v0.0.0-20231031181504-3be12e93388f // release-4.14 + github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.1 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.42.0 github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.10 - k8s.io/apimachinery v0.26.10 - k8s.io/apiserver v0.26.10 - k8s.io/client-go v0.26.10 - k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 - sigs.k8s.io/controller-runtime v0.14.7 + k8s.io/api v0.27.7 + k8s.io/apimachinery v0.27.7 + k8s.io/apiserver v0.27.7 + k8s.io/client-go v0.27.7 + k8s.io/utils v0.0.0-20230505201702-9f6742963106 + sigs.k8s.io/controller-runtime v0.15.3 sigs.k8s.io/yaml v1.3.0 ) @@ -53,8 +53,8 @@ require ( github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -121,13 +121,13 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d // indirect github.com/weaveworks/promrus v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.5 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect - go.etcd.io/etcd/client/v3 v3.5.5 // indirect + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect go.opentelemetry.io/otel v1.11.2 // indirect go.opentelemetry.io/otel/trace v1.11.2 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/goleak v1.2.0 // indirect + go.uber.org/goleak v1.2.1 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.24.0 // indirect go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect @@ -142,17 +142,17 @@ require ( golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.12.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 // indirect google.golang.org/grpc v1.52.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.26.10 // indirect - k8s.io/component-base v0.26.10 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect + k8s.io/apiextensions-apiserver v0.27.7 // indirect + k8s.io/component-base v0.27.7 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/operator/go.sum b/operator/go.sum index c2e4b33424d5f..982073b8b5eb6 100644 --- a/operator/go.sum +++ b/operator/go.sum @@ -55,21 +55,13 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -83,16 +75,9 @@ github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYr github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/ViaQ/logerr/v2 v2.1.0 h1:8WwzuNa1x+a6tRUl+6sFel83A/QxlFBUaFW2FyG2zzY= github.com/ViaQ/logerr/v2 v2.1.0/go.mod h1:/qoWLm3YG40Sv5u75s4fvzjZ5p36xINzaxU2L+DJ9uw= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -101,18 +86,14 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA= @@ -123,7 +104,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -148,48 +128,27 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -204,8 +163,6 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= @@ -217,13 +174,9 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -239,65 +192,22 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -316,7 +226,6 @@ github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -331,7 +240,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -378,7 +286,6 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -399,8 +306,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= @@ -413,16 +320,11 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 h1:IOks+FXJ6iO/pfbaVEf4efNw+YzYBYNCkCabyrbkFTM= github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2/go.mod h1:zj+5BNZAVmQafV583uLTAOzRr963KPdEm4d6NPmtbwg= @@ -432,13 +334,9 @@ github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 h1:VXitROTlm github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765/go.mod h1:DhJMrd2QInI/1CNtTN43BZuTmkccdizW1jZ+F6aHkhY= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= @@ -485,7 +383,6 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= @@ -494,16 +391,13 @@ github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7 h1:XOdd3JHyeQn github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -511,14 +405,11 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -542,18 +433,10 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -570,7 +453,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -589,7 +471,6 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -603,31 +484,22 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/openshift/api v0.0.0-20230228142948-d170fcdc0fa6 h1:SKsW89hdqceWIg1BT+vhLia8CltOV/zpY63hO67ckbw= -github.com/openshift/api v0.0.0-20230228142948-d170fcdc0fa6/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= -github.com/openshift/library-go v0.0.0-20230302173334-c5e706838384 h1:lNyOAREdff+F246oiR7OUGMRQSYbS0OXYbNPMTJMO/4= -github.com/openshift/library-go v0.0.0-20230302173334-c5e706838384/go.mod h1:xO4nAf0qa56dgvEJWVD1WuwSJ8JWPU1TYLBQrlutWnE= +github.com/openshift/api v0.0.0-20231031181504-3be12e93388f h1:j7QYl057TBD/lxWAGCr4IrbdG7IMg2U60DFK+BKt3VI= +github.com/openshift/api v0.0.0-20231031181504-3be12e93388f/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= +github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123 h1:JfXG50f8yVud5xakwTHoqD00+3HYdLmZuEqn5Sq8ZRQ= +github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123/go.mod h1:ZFwNwC3opc/7aOvzUbU95zp33Lbxet48h80ryH3p6DY= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -641,9 +513,6 @@ github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -653,15 +522,13 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0 h1:klFBev4UPGvhr3GF2b73Q1omlzZVONAhLwDhcQX0+4E= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.1 h1:u1Mw9irznvsBPxQxjUmCel1ufP3UgzA1CILj7/2tpNw= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.1/go.mod h1:KZHvrby65G+rA4V/vMTUXDV22TI+GgLIrCigYClpjzk= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= @@ -696,11 +563,9 @@ github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwa github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI= github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12 h1:Aaz4T7dZp7cB2cv7D/tGtRdSMh48sRaDYr7Jh0HV4qQ= @@ -709,7 +574,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -717,26 +581,14 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -748,10 +600,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -759,33 +610,23 @@ github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d h1:9Z/HiqeGN+LOnmotAMpFEQjuXZ4AGAVFG0rC1laP5Go= github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d/go.mod h1:Fnq3+U51tMkPRMC6Wr7zKGUeFFYX4YjNrNK50iU0fcE= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= -go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= -go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= -go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= -go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= -go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= +go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= +go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= +go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= +go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= +go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -798,36 +639,27 @@ go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9 go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -863,7 +695,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -879,17 +710,12 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -898,11 +724,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -983,33 +806,24 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1043,7 +857,6 @@ golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1083,7 +896,6 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1096,7 +908,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1105,10 +916,8 @@ golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1118,14 +927,11 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1177,8 +983,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= +gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1341,7 +1147,6 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= @@ -1373,17 +1178,11 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1399,7 +1198,6 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1407,52 +1205,33 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/api v0.26.10 h1:skTnrDR0r8dg4MMLf6YZIzugxNM0BjFsWKPkNc5kOvk= -k8s.io/api v0.26.10/go.mod h1:ou/H3yviqrHtP/DSPVTfsc7qNfmU06OhajytJfYXkXw= -k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= -k8s.io/apiextensions-apiserver v0.26.10 h1:wAriTUc6l7gUqJKOxhmXnYo/VNJzk4oh4QLCUR4Uq+k= -k8s.io/apiextensions-apiserver v0.26.10/go.mod h1:N2qhlxkhJLSoC4f0M1/1lNG627b45SYqnOPEVFoQXw4= -k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.26.10 h1:aE+J2KIbjctFqPp3Y0q4Wh2PD+l1p2g3Zp4UYjSvtGU= -k8s.io/apimachinery v0.26.10/go.mod h1:iT1ZP4JBP34wwM+ZQ8ByPEQ81u043iqAcsJYftX9amM= -k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= -k8s.io/apiserver v0.26.10 h1:gradpIHygzZN87yK+o6V3gpbCSF78HZ0hejLZQQwdDs= -k8s.io/apiserver v0.26.10/go.mod h1:TGrQKQWUfQcotK3P4TtoVZxXOWklFF36QZlA5wufLs4= -k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= -k8s.io/client-go v0.26.10 h1:4mDzl+1IrfRxh4Ro0s65JRGJp14w77gSMUTjACYWVRo= -k8s.io/client-go v0.26.10/go.mod h1:sh74ig838gCckU4ElYclWb24lTesPdEDPnlyg5vcbkA= -k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= -k8s.io/component-base v0.26.10 h1:vl3Gfe5aC09mNxfnQtTng7u3rnBVrShOK3MAkqEleb0= -k8s.io/component-base v0.26.10/go.mod h1:/IDdENUHG5uGxqcofZajovYXE9KSPzJ4yQbkYQt7oN0= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY= -k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.27.7 h1:7yG4D3t/q4utJe2ptlRw9aPuxcSmroTsYxsofkQNl/A= +k8s.io/api v0.27.7/go.mod h1:ZNExI/Lhrs9YrLgVWx6jjHZdoWCTXfBXuFjt1X6olro= +k8s.io/apiextensions-apiserver v0.27.7 h1:YqIOwZAUokzxJIjunmUd4zS1v3JhK34EPXn+pP0/bsU= +k8s.io/apiextensions-apiserver v0.27.7/go.mod h1:x0p+b5a955lfPz9gaDeBy43obM12s+N9dNHK6+dUL+g= +k8s.io/apimachinery v0.27.7 h1:Gxgtb7Y/Rsu8ymgmUEaiErkxa6RY4oTd8kNUI6SUR58= +k8s.io/apimachinery v0.27.7/go.mod h1:jBGQgTjkw99ef6q5hv1YurDd3BqKDk9YRxmX0Ozo0i8= +k8s.io/apiserver v0.27.7 h1:E8sDHwfUug82YC1++qvE73QxihaXDqT4tr8XYBOEtc4= +k8s.io/apiserver v0.27.7/go.mod h1:OrLG9RwCOerutAlo8QJW5EHzUG9Dad7k6rgcDUNSO/w= +k8s.io/client-go v0.27.7 h1:+Xgh9OOKv6A3qdD4Dnl/0VOI5EvAv+0s/OseDxVVTwQ= +k8s.io/client-go v0.27.7/go.mod h1:dZ2kqcalYp5YZ2EV12XIMc77G6PxHWOJp/kclZr4+5Q= +k8s.io/component-base v0.27.7 h1:kngM58HR9W9Nqpv7e4rpdRyWnKl/ABpUhLAZ+HoliMs= +k8s.io/component-base v0.27.7/go.mod h1:YGjlCVL1oeKvG3HSciyPHFh+LCjIEqsxz4BDR3cfHRs= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= +k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/controller-runtime v0.14.7 h1:Vrnm2vk9ZFlRkXATHz0W0wXcqNl7kPat8q2JyxVy0Q8= -sigs.k8s.io/controller-runtime v0.14.7/go.mod h1:ErTs3SJCOujNUnTz4AS+uh8hp6DHMo1gj6fFndJT1X8= +sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= +sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/operator/internal/external/k8s/builder.go b/operator/internal/external/k8s/builder.go index 99cd51c6842b4..a54bde54d7c48 100644 --- a/operator/internal/external/k8s/builder.go +++ b/operator/internal/external/k8s/builder.go @@ -8,7 +8,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" ) // Builder is a controller-runtime interface used internally. It copies function from @@ -18,7 +17,7 @@ import ( type Builder interface { For(object client.Object, opts ...builder.ForOption) Builder Owns(object client.Object, opts ...builder.OwnsOption) Builder - Watches(src source.Source, handler handler.EventHandler, opts ...builder.WatchesOption) Builder + Watches(object client.Object, handler handler.EventHandler, opts ...builder.WatchesOption) Builder WithEventFilter(p predicate.Predicate) Builder WithOptions(options controller.Options) Builder WithLogConstructor(logConstructor func(*reconcile.Request) logr.Logger) Builder @@ -45,8 +44,8 @@ func (b *ctrlBuilder) Owns(object client.Object, opts ...builder.OwnsOption) Bui return &ctrlBuilder{bld: b.bld.Owns(object, opts...)} } -func (b *ctrlBuilder) Watches(src source.Source, handler handler.EventHandler, opts ...builder.WatchesOption) Builder { - return &ctrlBuilder{bld: b.bld.Watches(src, handler, opts...)} +func (b *ctrlBuilder) Watches(object client.Object, handler handler.EventHandler, opts ...builder.WatchesOption) Builder { + return &ctrlBuilder{bld: b.bld.Watches(object, handler, opts...)} } func (b *ctrlBuilder) WithEventFilter(p predicate.Predicate) Builder { diff --git a/operator/internal/external/k8s/client.go b/operator/internal/external/k8s/client.go index 9dc128021f42d..b227bf8499253 100644 --- a/operator/internal/external/k8s/client.go +++ b/operator/internal/external/k8s/client.go @@ -5,6 +5,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -27,6 +28,8 @@ type Client interface { RESTMapper() meta.RESTMapper Scheme() *runtime.Scheme + GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) + IsObjectNamespaced(obj runtime.Object) (bool, error) Status() client.StatusWriter SubResource(subResource string) client.SubResourceClient } diff --git a/operator/internal/external/k8s/k8sfakes/fake_builder.go b/operator/internal/external/k8s/k8sfakes/fake_builder.go index e67459af53fcc..9470b477be069 100644 --- a/operator/internal/external/k8s/k8sfakes/fake_builder.go +++ b/operator/internal/external/k8s/k8sfakes/fake_builder.go @@ -12,7 +12,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" ) type FakeBuilder struct { @@ -75,10 +74,10 @@ type FakeBuilder struct { ownsReturnsOnCall map[int]struct { result1 k8s.Builder } - WatchesStub func(source.Source, handler.EventHandler, ...builder.WatchesOption) k8s.Builder + WatchesStub func(client.Object, handler.EventHandler, ...builder.WatchesOption) k8s.Builder watchesMutex sync.RWMutex watchesArgsForCall []struct { - arg1 source.Source + arg1 client.Object arg2 handler.EventHandler arg3 []builder.WatchesOption } @@ -435,11 +434,11 @@ func (fake *FakeBuilder) OwnsReturnsOnCall(i int, result1 k8s.Builder) { }{result1} } -func (fake *FakeBuilder) Watches(arg1 source.Source, arg2 handler.EventHandler, arg3 ...builder.WatchesOption) k8s.Builder { +func (fake *FakeBuilder) Watches(arg1 client.Object, arg2 handler.EventHandler, arg3 ...builder.WatchesOption) k8s.Builder { fake.watchesMutex.Lock() ret, specificReturn := fake.watchesReturnsOnCall[len(fake.watchesArgsForCall)] fake.watchesArgsForCall = append(fake.watchesArgsForCall, struct { - arg1 source.Source + arg1 client.Object arg2 handler.EventHandler arg3 []builder.WatchesOption }{arg1, arg2, arg3}) @@ -462,13 +461,13 @@ func (fake *FakeBuilder) WatchesCallCount() int { return len(fake.watchesArgsForCall) } -func (fake *FakeBuilder) WatchesCalls(stub func(source.Source, handler.EventHandler, ...builder.WatchesOption) k8s.Builder) { +func (fake *FakeBuilder) WatchesCalls(stub func(client.Object, handler.EventHandler, ...builder.WatchesOption) k8s.Builder) { fake.watchesMutex.Lock() defer fake.watchesMutex.Unlock() fake.WatchesStub = stub } -func (fake *FakeBuilder) WatchesArgsForCall(i int) (source.Source, handler.EventHandler, []builder.WatchesOption) { +func (fake *FakeBuilder) WatchesArgsForCall(i int) (client.Object, handler.EventHandler, []builder.WatchesOption) { fake.watchesMutex.RLock() defer fake.watchesMutex.RUnlock() argsForCall := fake.watchesArgsForCall[i] diff --git a/operator/internal/external/k8s/k8sfakes/fake_client.go b/operator/internal/external/k8s/k8sfakes/fake_client.go index 3a92d9463fec7..31ab3ee45791b 100644 --- a/operator/internal/external/k8s/k8sfakes/fake_client.go +++ b/operator/internal/external/k8s/k8sfakes/fake_client.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/loki/operator/internal/external/k8s" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -66,6 +67,32 @@ type FakeClient struct { getReturnsOnCall map[int]struct { result1 error } + GroupVersionKindForStub func(runtime.Object) (schema.GroupVersionKind, error) + groupVersionKindForMutex sync.RWMutex + groupVersionKindForArgsForCall []struct { + arg1 runtime.Object + } + groupVersionKindForReturns struct { + result1 schema.GroupVersionKind + result2 error + } + groupVersionKindForReturnsOnCall map[int]struct { + result1 schema.GroupVersionKind + result2 error + } + IsObjectNamespacedStub func(runtime.Object) (bool, error) + isObjectNamespacedMutex sync.RWMutex + isObjectNamespacedArgsForCall []struct { + arg1 runtime.Object + } + isObjectNamespacedReturns struct { + result1 bool + result2 error + } + isObjectNamespacedReturnsOnCall map[int]struct { + result1 bool + result2 error + } ListStub func(context.Context, client.ObjectList, ...client.ListOption) error listMutex sync.RWMutex listArgsForCall []struct { @@ -404,6 +431,134 @@ func (fake *FakeClient) GetReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeClient) GroupVersionKindFor(arg1 runtime.Object) (schema.GroupVersionKind, error) { + fake.groupVersionKindForMutex.Lock() + ret, specificReturn := fake.groupVersionKindForReturnsOnCall[len(fake.groupVersionKindForArgsForCall)] + fake.groupVersionKindForArgsForCall = append(fake.groupVersionKindForArgsForCall, struct { + arg1 runtime.Object + }{arg1}) + stub := fake.GroupVersionKindForStub + fakeReturns := fake.groupVersionKindForReturns + fake.recordInvocation("GroupVersionKindFor", []interface{}{arg1}) + fake.groupVersionKindForMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) GroupVersionKindForCallCount() int { + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + return len(fake.groupVersionKindForArgsForCall) +} + +func (fake *FakeClient) GroupVersionKindForCalls(stub func(runtime.Object) (schema.GroupVersionKind, error)) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = stub +} + +func (fake *FakeClient) GroupVersionKindForArgsForCall(i int) runtime.Object { + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + argsForCall := fake.groupVersionKindForArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClient) GroupVersionKindForReturns(result1 schema.GroupVersionKind, result2 error) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = nil + fake.groupVersionKindForReturns = struct { + result1 schema.GroupVersionKind + result2 error + }{result1, result2} +} + +func (fake *FakeClient) GroupVersionKindForReturnsOnCall(i int, result1 schema.GroupVersionKind, result2 error) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = nil + if fake.groupVersionKindForReturnsOnCall == nil { + fake.groupVersionKindForReturnsOnCall = make(map[int]struct { + result1 schema.GroupVersionKind + result2 error + }) + } + fake.groupVersionKindForReturnsOnCall[i] = struct { + result1 schema.GroupVersionKind + result2 error + }{result1, result2} +} + +func (fake *FakeClient) IsObjectNamespaced(arg1 runtime.Object) (bool, error) { + fake.isObjectNamespacedMutex.Lock() + ret, specificReturn := fake.isObjectNamespacedReturnsOnCall[len(fake.isObjectNamespacedArgsForCall)] + fake.isObjectNamespacedArgsForCall = append(fake.isObjectNamespacedArgsForCall, struct { + arg1 runtime.Object + }{arg1}) + stub := fake.IsObjectNamespacedStub + fakeReturns := fake.isObjectNamespacedReturns + fake.recordInvocation("IsObjectNamespaced", []interface{}{arg1}) + fake.isObjectNamespacedMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) IsObjectNamespacedCallCount() int { + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() + return len(fake.isObjectNamespacedArgsForCall) +} + +func (fake *FakeClient) IsObjectNamespacedCalls(stub func(runtime.Object) (bool, error)) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = stub +} + +func (fake *FakeClient) IsObjectNamespacedArgsForCall(i int) runtime.Object { + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() + argsForCall := fake.isObjectNamespacedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClient) IsObjectNamespacedReturns(result1 bool, result2 error) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = nil + fake.isObjectNamespacedReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeClient) IsObjectNamespacedReturnsOnCall(i int, result1 bool, result2 error) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = nil + if fake.isObjectNamespacedReturnsOnCall == nil { + fake.isObjectNamespacedReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.isObjectNamespacedReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + func (fake *FakeClient) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { fake.listMutex.Lock() ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] @@ -825,6 +980,10 @@ func (fake *FakeClient) Invocations() map[string][][]interface{} { defer fake.deleteAllOfMutex.RUnlock() fake.getMutex.RLock() defer fake.getMutex.RUnlock() + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() fake.listMutex.RLock() defer fake.listMutex.RUnlock() fake.patchMutex.RLock() diff --git a/operator/internal/handlers/internal/storage/ca_configmap_test.go b/operator/internal/handlers/internal/storage/ca_configmap_test.go index b42c74d08aca0..1e164f5a25413 100644 --- a/operator/internal/handlers/internal/storage/ca_configmap_test.go +++ b/operator/internal/handlers/internal/storage/ca_configmap_test.go @@ -3,9 +3,10 @@ package storage_test import ( "testing" - "github.com/grafana/loki/operator/internal/handlers/internal/storage" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + + "github.com/grafana/loki/operator/internal/handlers/internal/storage" ) func TestIsValidConfigMap(t *testing.T) { diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go index a97c2150544de..1212e5c2f7e92 100644 --- a/operator/internal/status/lokistack.go +++ b/operator/internal/status/lokistack.go @@ -74,7 +74,7 @@ func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, m return updateCondition(ctx, k, req, degraded) } -func generateCondition(ctx context.Context, cs *lokiv1.LokiStackComponentStatus, k client.Client, req ctrl.Request, stack *lokiv1.LokiStack) (metav1.Condition, error) { +func generateCondition(ctx context.Context, cs *lokiv1.LokiStackComponentStatus, k k8s.Client, req ctrl.Request, stack *lokiv1.LokiStack) (metav1.Condition, error) { // Check for failed pods first failed := len(cs.Compactor[corev1.PodFailed]) + len(cs.Distributor[corev1.PodFailed]) + diff --git a/operator/internal/validation/alertingrule.go b/operator/internal/validation/alertingrule.go index 46d4710f61607..d72f0b1ae8d95 100644 --- a/operator/internal/validation/alertingrule.go +++ b/operator/internal/validation/alertingrule.go @@ -33,25 +33,25 @@ func (v *AlertingRuleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error } // ValidateCreate implements admission.CustomValidator. -func (v *AlertingRuleValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error { +func (v *AlertingRuleValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, obj) } // ValidateUpdate implements admission.CustomValidator. -func (v *AlertingRuleValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) error { +func (v *AlertingRuleValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, newObj) } // ValidateDelete implements admission.CustomValidator. -func (v *AlertingRuleValidator) ValidateDelete(_ context.Context, _ runtime.Object) error { +func (v *AlertingRuleValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { // No validation on delete - return nil + return nil, nil } -func (v *AlertingRuleValidator) validate(ctx context.Context, obj runtime.Object) error { +func (v *AlertingRuleValidator) validate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { alertingRule, ok := obj.(*lokiv1.AlertingRule) if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("object is not of type AlertingRule: %t", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("object is not of type AlertingRule: %t", obj)) } var allErrs field.ErrorList @@ -122,10 +122,10 @@ func (v *AlertingRuleValidator) validate(ctx context.Context, obj runtime.Object } if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( schema.GroupKind{Group: "loki.grafana.com", Kind: "AlertingRule"}, alertingRule.Name, allErrs, diff --git a/operator/internal/validation/alertingrule_test.go b/operator/internal/validation/alertingrule_test.go index 4e1526460b9a2..7ff5bdb1cca7c 100644 --- a/operator/internal/validation/alertingrule_test.go +++ b/operator/internal/validation/alertingrule_test.go @@ -232,7 +232,7 @@ func TestAlertingRuleValidationWebhook_ValidateCreate(t *testing.T) { ctx := context.Background() v := &validation.AlertingRuleValidator{} - err := v.ValidateCreate(ctx, l) + _, err := v.ValidateCreate(ctx, l) if err != nil { require.Equal(t, tc.err, err) } else { @@ -257,7 +257,7 @@ func TestAlertingRuleValidationWebhook_ValidateUpdate(t *testing.T) { ctx := context.Background() v := &validation.AlertingRuleValidator{} - err := v.ValidateUpdate(ctx, &lokiv1.AlertingRule{}, l) + _, err := v.ValidateUpdate(ctx, &lokiv1.AlertingRule{}, l) if err != nil { require.Equal(t, tc.err, err) } else { diff --git a/operator/internal/validation/lokistack.go b/operator/internal/validation/lokistack.go index 8f78ae482bb20..3246b7ada106f 100644 --- a/operator/internal/validation/lokistack.go +++ b/operator/internal/validation/lokistack.go @@ -35,25 +35,25 @@ func (v *LokiStackValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { } // ValidateCreate implements admission.CustomValidator. -func (v *LokiStackValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error { +func (v *LokiStackValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, obj) } // ValidateUpdate implements admission.CustomValidator. -func (v *LokiStackValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) error { +func (v *LokiStackValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, newObj) } // ValidateDelete implements admission.CustomValidator. -func (v *LokiStackValidator) ValidateDelete(_ context.Context, _ runtime.Object) error { +func (v *LokiStackValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { // No validation on delete - return nil + return nil, nil } -func (v *LokiStackValidator) validate(ctx context.Context, obj runtime.Object) error { +func (v *LokiStackValidator) validate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { stack, ok := obj.(*lokiv1.LokiStack) if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("object is not of type LokiStack: %t", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("object is not of type LokiStack: %t", obj)) } var allErrs field.ErrorList @@ -83,10 +83,10 @@ func (v *LokiStackValidator) validate(ctx context.Context, obj runtime.Object) e } if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( schema.GroupKind{Group: "loki.grafana.com", Kind: "LokiStack"}, stack.Name, allErrs, diff --git a/operator/internal/validation/lokistack_test.go b/operator/internal/validation/lokistack_test.go index a8aaab1f41f94..238f884980e08 100644 --- a/operator/internal/validation/lokistack_test.go +++ b/operator/internal/validation/lokistack_test.go @@ -407,7 +407,7 @@ func TestLokiStackValidationWebhook_ValidateCreate(t *testing.T) { ctx := context.Background() v := &validation.LokiStackValidator{} - err := v.ValidateCreate(ctx, l) + _, err := v.ValidateCreate(ctx, l) if err != nil { require.Equal(t, tc.err, err) } else { @@ -431,7 +431,7 @@ func TestLokiStackValidationWebhook_ValidateUpdate(t *testing.T) { ctx := context.Background() v := &validation.LokiStackValidator{} - err := v.ValidateUpdate(ctx, &lokiv1.LokiStack{}, l) + _, err := v.ValidateUpdate(ctx, &lokiv1.LokiStack{}, l) if err != nil { require.Equal(t, tc.err, err) } else { diff --git a/operator/internal/validation/recordingrule.go b/operator/internal/validation/recordingrule.go index 97395408e8ba7..a84011efd2de8 100644 --- a/operator/internal/validation/recordingrule.go +++ b/operator/internal/validation/recordingrule.go @@ -33,25 +33,25 @@ func (v *RecordingRuleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error } // ValidateCreate implements admission.CustomValidator. -func (v *RecordingRuleValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error { +func (v *RecordingRuleValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, obj) } // ValidateUpdate implements admission.CustomValidator. -func (v *RecordingRuleValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) error { +func (v *RecordingRuleValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, newObj) } // ValidateDelete implements admission.CustomValidator. -func (v *RecordingRuleValidator) ValidateDelete(_ context.Context, _ runtime.Object) error { +func (v *RecordingRuleValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { // No validation on delete - return nil + return nil, nil } -func (v *RecordingRuleValidator) validate(ctx context.Context, obj runtime.Object) error { +func (v *RecordingRuleValidator) validate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { recordingRule, ok := obj.(*lokiv1.RecordingRule) if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("object is not of type RecordingRule: %t", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("object is not of type RecordingRule: %t", obj)) } var allErrs field.ErrorList @@ -120,10 +120,10 @@ func (v *RecordingRuleValidator) validate(ctx context.Context, obj runtime.Objec } if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( schema.GroupKind{Group: "loki.grafana.com", Kind: "RecordingRule"}, recordingRule.Name, allErrs, diff --git a/operator/internal/validation/recordingrule_test.go b/operator/internal/validation/recordingrule_test.go index 9fd37004d8695..dcbbc2d7bf4da 100644 --- a/operator/internal/validation/recordingrule_test.go +++ b/operator/internal/validation/recordingrule_test.go @@ -201,7 +201,7 @@ func TestRecordingRuleValidationWebhook_ValidateCreate(t *testing.T) { } v := &validation.RecordingRuleValidator{} - err := v.ValidateCreate(ctx, l) + _, err := v.ValidateCreate(ctx, l) if err != nil { require.Equal(t, tc.err, err) } else { @@ -226,7 +226,7 @@ func TestRecordingRuleValidationWebhook_ValidateUpdate(t *testing.T) { } v := &validation.RecordingRuleValidator{} - err := v.ValidateUpdate(ctx, &lokiv1.RecordingRule{}, l) + _, err := v.ValidateUpdate(ctx, &lokiv1.RecordingRule{}, l) if err != nil { require.Equal(t, tc.err, err) } else { diff --git a/operator/internal/validation/rulerconfig.go b/operator/internal/validation/rulerconfig.go index f0b10ea95ffc3..9e4ab0b0b8297 100644 --- a/operator/internal/validation/rulerconfig.go +++ b/operator/internal/validation/rulerconfig.go @@ -29,25 +29,25 @@ func (v *RulerConfigValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { } // ValidateCreate implements admission.CustomValidator. -func (v *RulerConfigValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error { +func (v *RulerConfigValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, obj) } // ValidateUpdate implements admission.CustomValidator. -func (v *RulerConfigValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) error { +func (v *RulerConfigValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { return v.validate(ctx, newObj) } // ValidateDelete implements admission.CustomValidator. -func (v *RulerConfigValidator) ValidateDelete(_ context.Context, _ runtime.Object) error { +func (v *RulerConfigValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { // No validation on delete - return nil + return nil, nil } -func (v *RulerConfigValidator) validate(ctx context.Context, obj runtime.Object) error { +func (v *RulerConfigValidator) validate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { rulerConfig, ok := obj.(*lokiv1.RulerConfig) if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("object is not of type RulerConfig: %t", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("object is not of type RulerConfig: %t", obj)) } var allErrs field.ErrorList @@ -93,10 +93,10 @@ func (v *RulerConfigValidator) validate(ctx context.Context, obj runtime.Object) } if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( schema.GroupKind{Group: "loki.grafana.com", Kind: "RulerConfig"}, rulerConfig.Name, allErrs, diff --git a/operator/internal/validation/rulerconfig_test.go b/operator/internal/validation/rulerconfig_test.go index b20375e560f12..374e6b5206b0c 100644 --- a/operator/internal/validation/rulerconfig_test.go +++ b/operator/internal/validation/rulerconfig_test.go @@ -182,7 +182,7 @@ func TestRulerConfigValidationWebhook_ValidateCreate(t *testing.T) { } v := &validation.RulerConfigValidator{} - err := v.ValidateCreate(ctx, l) + _, err := v.ValidateCreate(ctx, l) if err != nil { require.Equal(t, tc.err, err) } else { @@ -207,7 +207,7 @@ func TestRulerConfigValidationWebhook_ValidateUpdate(t *testing.T) { } v := &validation.RulerConfigValidator{} - err := v.ValidateUpdate(ctx, &lokiv1.RulerConfig{}, l) + _, err := v.ValidateUpdate(ctx, &lokiv1.RulerConfig{}, l) if err != nil { require.Equal(t, tc.err, err) } else { diff --git a/operator/main.go b/operator/main.go index 7ebfb136887be..6b101175407e7 100644 --- a/operator/main.go +++ b/operator/main.go @@ -62,7 +62,7 @@ func main() { ctrlCfg := ctrlconfigv1.ProjectConfig{} options := ctrl.Options{Scheme: scheme} if configFile != "" { - options, err = options.AndFrom(ctrl.ConfigFile().AtPath(configFile).OfKind(&ctrlCfg)) + options, err = options.AndFrom(ctrl.ConfigFile().AtPath(configFile).OfKind(&ctrlCfg)) //nolint:staticcheck if err != nil { logger.Error(err, "failed to parse controller manager config file") os.Exit(1) From 5395c36314b08291d32d9b4823c6c3a894a77275 Mon Sep 17 00:00:00 2001 From: Chris Knutson Date: Fri, 17 Nov 2023 10:09:45 -0600 Subject: [PATCH 11/48] ksonnet: generate tsdb_shipper storage_config even if using_boltdb_shipper is false (#11195) **What this PR does / why we need it**: When configuring tsdb shipper without using bolt db shipper, the ksonnet library fails to generate the tsdb_shipper storage config. --- CHANGELOG.md | 1 + production/ksonnet/loki/shipper.libsonnet | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b01d4e3dc1815..65c97f3d8dee5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ ##### Fixes * [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var. +* [11195](https://github.com/grafana/loki/pull/11195) **canuteson** Generate tsdb_shipper storage_config even if using_boltdb_shipper is false ##### Changes diff --git a/production/ksonnet/loki/shipper.libsonnet b/production/ksonnet/loki/shipper.libsonnet index b0140bd8f4826..374a797eba0c9 100644 --- a/production/ksonnet/loki/shipper.libsonnet +++ b/production/ksonnet/loki/shipper.libsonnet @@ -24,7 +24,7 @@ active_index_directory: '/data/index', cache_location: '/data/boltdb-cache', }, - } + if $._config.using_tsdb_shipper then { + } else {} + if $._config.using_tsdb_shipper then { tsdb_shipper+: { active_index_directory: '/data/tsdb-index', cache_location: '/data/tsdb-cache', From 15d98fa7cae6cbe6de97c487edafc96df5ad34d7 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 17 Nov 2023 13:04:48 -0800 Subject: [PATCH 12/48] iterable tokenizer w/ comparison benching (#11255) Rebuilds tokenizers based on an iterable approach. This ends up being significantly faster -- benchmarks below. This PR doesn't remove the old tokenizers, just adds the new ones. Revamping tokenizer consumers & removing the old variants can be done in a separate PR. Roughly, raw tokenizers complete in 66% of the time and chunkID prefixed tokenizers in ~55-60% of the time. ``` pkg: github.com/grafana/loki/pkg/storage/bloom/v1 BenchmarkTokens/three/v1-10 134811 8648 ns/op 0 B/op 0 allocs/op BenchmarkTokens/three/v2-10 179797 5728 ns/op 0 B/op 0 allocs/op BenchmarkTokens/threeSkip1/v1-10 215822 5260 ns/op 0 B/op 0 allocs/op BenchmarkTokens/threeSkip1/v2-10 343784 3514 ns/op 0 B/op 0 allocs/op BenchmarkTokens/threeChunk/v1-10 82394 14559 ns/op 0 B/op 0 allocs/op BenchmarkTokens/threeChunk/v2-10 146973 8138 ns/op 128 B/op 1 allocs/op BenchmarkTokens/threeSkip1Chunk/v1-10 152475 7878 ns/op 0 B/op 0 allocs/op BenchmarkTokens/threeSkip1Chunk/v2-10 251373 4776 ns/op 128 B/op 1 allocs/op PASS ok github.com/grafana/loki/pkg/storage/bloom/v1 10.206s ``` --- pkg/storage/bloom/v1/tokenizer.go | 101 ++++++++++++- pkg/storage/bloom/v1/tokenizer_test.go | 189 +++++++++++++++++++++++-- 2 files changed, 277 insertions(+), 13 deletions(-) diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go index 96e51f2cd0488..e27fa04e312f5 100644 --- a/pkg/storage/bloom/v1/tokenizer.go +++ b/pkg/storage/bloom/v1/tokenizer.go @@ -81,7 +81,7 @@ func (t *NgramTokenizer) Tokens(line string) []Token { t.buffers[j][pos] = r if i >= n-1 && (i+1-n)%(t.skip+1) == 0 { - t.runeBuffer = reassemble(t.buffers[j], (i+1)%n, t.runeBuffer) + t.runeBuffer = reassemble(t.buffers[j], len(t.buffers[j]), (i+1)%n, t.runeBuffer) if numToks >= cap(t.internalTokenBuffer) || numToks == len(t.internalTokenBuffer) { t.internalTokenBuffer = append(t.internalTokenBuffer, Token{Key: make([]byte, 0, TokenKeySize)}) } @@ -95,9 +95,9 @@ func (t *NgramTokenizer) Tokens(line string) []Token { return t.internalTokenBuffer[0:numToks] } -func reassemble(buf []rune, pos int, result []byte) []byte { +func reassemble(buf []rune, ln, pos int, result []byte) []byte { result = result[:0] // Reset the result slice - for i := 0; i < len(buf); i++ { + for i := 0; i < ln; i++ { cur := (pos + i) % len(buf) result = utf8.AppendRune(result, buf[cur]) } @@ -168,3 +168,98 @@ func (w *WrappedTokenizer) Reinit(chk logproto.ChunkRef) { binary.LittleEndian.PutUint32(w.i32buf, chk.Checksum) w.prefix = append(w.prefix, w.i32buf...) } + +// Iterable variants (more performant, less space) + +type NGramTokenizerV2 struct { + n, skip int + buffer []rune // circular buffer used for ngram generation + res []byte // buffer used for token generation +} + +/* +N-Grams (https://en.wikipedia.org/wiki/N-gram) are a series of 'n' adjacent characters in a string. +These will be utilized for the bloom filters to allow for fuzzy searching. +*/ +func NewNGramTokenizerV2(n, skip int) *NGramTokenizerV2 { + t := &NGramTokenizerV2{ + n: n, + skip: skip, + buffer: make([]rune, n+skip), + res: make([]byte, 0, n*4), // maximum 4 bytes per rune + } + + return t +} + +// The Token iterator uses shared buffers for performance. The []byte returned by At() +// is not safe for use after subsequent calls to Next() +func (t *NGramTokenizerV2) Tokens(line string) NGramTokenIter { + return NGramTokenIter{ + n: t.n, + skip: t.skip, + + line: line, + + buffer: t.buffer, + res: t.res, + } +} + +type NGramTokenIter struct { + n, skip int + + runeIndex, offset int + line string // source + + buffer []rune // circular buffers used for ngram generation + res []byte +} + +func (t *NGramTokenIter) Next() bool { + for i, r := range t.line[t.offset:] { + t.buffer[t.runeIndex%len(t.buffer)] = r + t.runeIndex++ + + if t.runeIndex < t.n { + continue + } + + // if the start of the ngram is at the interval of our skip factor, emit it. + // we increment the skip due to modulo logic: + // because `n % 0 is a divide by zero and n % 1 is always 0` + if (t.runeIndex-t.n)%(t.skip+1) == 0 { + t.offset += (i + utf8.RuneLen(r)) + return true + } + + } + return false +} + +func (t *NGramTokenIter) At() []byte { + return reassemble(t.buffer, t.n, (t.runeIndex-t.n)%len(t.buffer), t.res[:0]) +} + +func (t *NGramTokenIter) Err() error { + return nil +} + +type PrefixedTokenIter struct { + prefix []byte + prefixLen int + + NGramTokenIter +} + +func (t *PrefixedTokenIter) At() []byte { + return append(t.prefix[:t.prefixLen], t.NGramTokenIter.At()...) +} + +func NewPrefixedTokenIter(prefix []byte, iter NGramTokenIter) *PrefixedTokenIter { + return &PrefixedTokenIter{ + prefix: prefix, + prefixLen: len(prefix), + NGramTokenIter: iter, + } +} diff --git a/pkg/storage/bloom/v1/tokenizer_test.go b/pkg/storage/bloom/v1/tokenizer_test.go index 8a2c32d7930d8..a0becd464646a 100644 --- a/pkg/storage/bloom/v1/tokenizer_test.go +++ b/pkg/storage/bloom/v1/tokenizer_test.go @@ -25,6 +25,46 @@ var ( six = NewNGramTokenizer(6, 7, 0) ) +func TestNGramIterator(t *testing.T) { + var ( + three = NewNGramTokenizerV2(3, 0) + threeSkip1 = NewNGramTokenizerV2(3, 1) + threeSkip3 = NewNGramTokenizerV2(3, 3) + ) + + for _, tc := range []struct { + desc string + t *NGramTokenizerV2 + input string + exp []string + }{ + { + t: three, + input: "abcdefg", + exp: []string{"abc", "bcd", "cde", "def", "efg"}, + }, + { + t: threeSkip1, + input: "abcdefg", + exp: []string{"abc", "cde", "efg"}, + }, + { + t: threeSkip3, + input: "abcdefgh", + exp: []string{"abc", "efg"}, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + itr := tc.t.Tokens(tc.input) + for _, exp := range tc.exp { + require.True(t, itr.Next()) + require.Equal(t, exp, string(itr.At())) + } + require.False(t, itr.Next()) + }) + } +} + func TestNGrams(t *testing.T) { tokenizer := NewNGramTokenizer(2, 4, 0) for _, tc := range []struct { @@ -541,18 +581,147 @@ func TestWrappedTokenizer(t *testing.T) { } } +const lorem = ` +lorum ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna +aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat +duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur +sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est +laborum ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna +aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat +duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur +sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est +` + func BenchmarkTokens(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) + var ( + v2Three = NewNGramTokenizerV2(3, 0) + v2ThreeSkip1 = NewNGramTokenizerV2(3, 1) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - _ = three.Tokens(line) - } + // fp + from + through + checksum + chunkPrefixLen = 8 + 8 + 8 + 4 + ) + + type impl struct { + desc string + f func() + } + type tc struct { + desc string + impls []impl + } + for _, tc := range []tc{ + { + desc: "three", + impls: []impl{ + { + desc: "v1", + f: func() { + for _, tok := range three.Tokens(lorem) { + _ = tok + } + }, + }, + { + desc: "v2", + f: func() { + itr := v2Three.Tokens(lorem) + for itr.Next() { + _ = itr.At() + } + }, + }, + }, + }, + { + desc: "threeSkip1", + impls: []impl{ + { + desc: "v1", + f: func() { + for _, tok := range threeSkip1.Tokens(lorem) { + _ = tok + } + }, + }, + { + desc: "v2", + f: func() { + itr := v2ThreeSkip1.Tokens(lorem) + for itr.Next() { + _ = itr.At() + } + }, + }, + }, + }, + { + desc: "threeChunk", + impls: []impl{ + { + desc: "v1", + f: func() func() { + chunkTokenizer := ChunkIDTokenizer(three) + chunkTokenizer.Reinit(logproto.ChunkRef{}) + return func() { + for _, tok := range chunkTokenizer.Tokens(lorem) { + _ = tok + } + } + }(), + }, + { + desc: "v2", + f: func() func() { + prefix := make([]byte, chunkPrefixLen, 512) + return func() { + itr := NewPrefixedTokenIter(prefix, v2Three.Tokens(lorem)) + for itr.Next() { + _ = itr.At() + } + } + }(), + }, + }, + }, + { + desc: "threeSkip1Chunk", + impls: []impl{ + { + desc: "v1", + f: func() func() { + chunkTokenizer := ChunkIDTokenizer(threeSkip1) + chunkTokenizer.Reinit(logproto.ChunkRef{}) + return func() { + for _, tok := range chunkTokenizer.Tokens(lorem) { + _ = tok + } + } + }(), + }, + { + desc: "v2", + f: func() func() { + prefix := make([]byte, chunkPrefixLen, 512) + return func() { + itr := NewPrefixedTokenIter(prefix, v2ThreeSkip1.Tokens(lorem)) + for itr.Next() { + _ = itr.At() + } + } + }(), + }, + }, + }, + } { + b.Run(tc.desc, func(b *testing.B) { + for _, impl := range tc.impls { + b.Run(impl.desc, func(b *testing.B) { + for i := 0; i < b.N; i++ { + impl.f() + } + }) + } + }) } } From 6db9d778a14e1cfb6b615a571ae05059a5b979df Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 17 Nov 2023 13:13:53 -0800 Subject: [PATCH 13/48] inverts the logic when testing a block against a list of chunks (#11248) returns the list of chunks in the bloom which failed the test so we can merge these results across blocks --- pkg/storage/bloom/v1/fuse.go | 40 +++++++++++++++++-------------- pkg/storage/bloom/v1/fuse_test.go | 4 ++-- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go index 150c656aca04d..a75a4fe5df99a 100644 --- a/pkg/storage/bloom/v1/fuse.go +++ b/pkg/storage/bloom/v1/fuse.go @@ -12,11 +12,12 @@ type request struct { response chan output } -// output represents a chunk that failed to pass all searches -// and must be downloaded +// output represents a chunk that was present in the bloom +// but failed to pass the search filters and can be removed from +// the list of chunks to download type output struct { - fp model.Fingerprint - chks ChunkRefs + fp model.Fingerprint + removals ChunkRefs } // Fuse combines multiple requests into a single loop iteration @@ -77,8 +78,8 @@ func (fq *FusedQuerier) Run() error { // fingerprint not found, can't remove chunks for _, input := range nextBatch { input.response <- output{ - fp: fp, - chks: input.chks, + fp: fp, + removals: nil, } } } @@ -89,8 +90,8 @@ func (fq *FusedQuerier) Run() error { // fingerprint not found, can't remove chunks for _, input := range nextBatch { input.response <- output{ - fp: fp, - chks: input.chks, + fp: fp, + removals: nil, } } continue @@ -100,23 +101,27 @@ func (fq *FusedQuerier) Run() error { // test every input against this chunk inputLoop: for _, input := range nextBatch { - mustCheck, inBlooms := input.chks.Compare(series.Chunks, true) + _, inBlooms := input.chks.Compare(series.Chunks, true) // First, see if the search passes the series level bloom before checking for chunks individually for _, search := range input.searches { if !bloom.Test(search) { // the entire series bloom didn't pass one of the searches, // so we can skip checking chunks individually. - // We still return all chunks that are not included in the bloom - // as they may still have the data + // We return all the chunks that were the intersection of the query + // because they for sure do not match the search and don't + // need to be downloaded input.response <- output{ - fp: fp, - chks: mustCheck, + fp: fp, + removals: inBlooms, } continue inputLoop } } + // TODO(owen-d): pool + var removals ChunkRefs + chunkLoop: for _, chk := range inBlooms { for _, search := range input.searches { @@ -124,17 +129,16 @@ func (fq *FusedQuerier) Run() error { var combined = search if !bloom.ScalableBloomFilter.Test(combined) { + removals = append(removals, chk) continue chunkLoop } } - // chunk passed all searches, add to the list of chunks to download - mustCheck = append(mustCheck, chk) - + // Otherwise, the chunk passed all the searches } input.response <- output{ - fp: fp, - chks: mustCheck, + fp: fp, + removals: removals, } } diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go index e73f654c5295f..7114475597f8a 100644 --- a/pkg/storage/bloom/v1/fuse_test.go +++ b/pkg/storage/bloom/v1/fuse_test.go @@ -94,8 +94,8 @@ func TestFusedQuerier(t *testing.T) { require.Equal( t, output{ - fp: req.fp, - chks: req.chks, + fp: req.fp, + removals: nil, }, resp, ) From 58eaad9947e65f0373316a37532bfbc1b39ac838 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 17 Nov 2023 14:07:00 -0800 Subject: [PATCH 14/48] boundscheck & partitioning fingerprints between blocks (#11237) Adds a few utilities for comparing fingerprints to blocks that cover a specific fingerprint range. Will likely need to be refactored with more comprehensive types, but the logic still applies. --- pkg/storage/bloom/v1/fuse.go | 42 +++++++++++++++++++++++++++ pkg/storage/bloom/v1/fuse_test.go | 33 ++++++++++++++++++++++ pkg/storage/bloom/v1/util.go | 47 +++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go index a75a4fe5df99a..021fba1e81856 100644 --- a/pkg/storage/bloom/v1/fuse.go +++ b/pkg/storage/bloom/v1/fuse.go @@ -1,6 +1,8 @@ package v1 import ( + "sort" + "github.com/efficientgo/core/errors" "github.com/prometheus/common/model" ) @@ -146,3 +148,43 @@ func (fq *FusedQuerier) Run() error { return nil } + +// boundedRequests is a set of requests that are clamped to a specific range +type boundedRequests struct { + bounds FingerprintBounds + reqs [][]model.Fingerprint +} + +// reqs models a set of requests covering many fingerprints. +// consumers models a set of blocks covering different fingerprint ranges +func partitionFingerprintRange(reqs [][]model.Fingerprint, blocks []FingerprintBounds) (res []boundedRequests) { + for _, block := range blocks { + bounded := boundedRequests{ + bounds: block, + } + + for _, req := range reqs { + min := sort.Search(len(req), func(i int) bool { + return block.Cmp(req[i]) > Before + }) + + max := sort.Search(len(req), func(i int) bool { + return block.Cmp(req[i]) == After + }) + + // All fingerprints fall outside of the consumer's range + if min == len(req) || max == 0 { + continue + } + + bounded.reqs = append(bounded.reqs, req[min:max]) + } + + if len(bounded.reqs) > 0 { + res = append(res, bounded) + } + + } + + return res +} diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go index 7114475597f8a..b990d69f4b7bd 100644 --- a/pkg/storage/bloom/v1/fuse_test.go +++ b/pkg/storage/bloom/v1/fuse_test.go @@ -7,11 +7,44 @@ import ( "testing" "github.com/grafana/dskit/concurrency" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/chunkenc" ) +func TestPartitionFingerprintRange(t *testing.T) { + seriesPerBound := 100 + bounds := []FingerprintBounds{ + {0, 99}, + {100, 199}, + {200, 299}, + {300, 399}, // one out of bounds block + } + + nReqs := 4 + nSeries := 300 + reqs := make([][]model.Fingerprint, nReqs) + for i := 0; i < nSeries; i++ { + reqs[i%4] = append(reqs[i%nReqs], model.Fingerprint(i)) + } + + results := partitionFingerprintRange(reqs, bounds) + require.Equal(t, 3, len(results)) // ensure we only return bounds in range + for _, res := range results { + // ensure we have the right number of requests per bound + for i := 0; i < nReqs; i++ { + require.Equal(t, seriesPerBound/nReqs, len(res.reqs[i])) + } + } + + // ensure bound membership + for i := 0; i < nSeries; i++ { + require.Equal(t, model.Fingerprint(i), results[i/seriesPerBound].reqs[i%nReqs][i%seriesPerBound/nReqs]) + } + +} + func TestFusedQuerier(t *testing.T) { // references for linking in memory reader+writer indexBuf := bytes.NewBuffer(nil) diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go index e764cba5c6197..15de62e9f9590 100644 --- a/pkg/storage/bloom/v1/util.go +++ b/pkg/storage/bloom/v1/util.go @@ -7,6 +7,7 @@ import ( "io" "sync" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/util/pool" ) @@ -241,3 +242,49 @@ func PointerSlice[T any](xs []T) []*T { } return out } + +type BoundsCheck uint8 + +const ( + Before BoundsCheck = iota + Overlap + After +) + +type FingerprintBounds struct { + Min, Max model.Fingerprint +} + +// Cmp returns the fingerprint's position relative to the bounds +func (b FingerprintBounds) Cmp(fp model.Fingerprint) BoundsCheck { + if fp < b.Min { + return Before + } else if fp > b.Max { + return After + } + return Overlap +} + +// unused, but illustrative +type BoundedIter[V any] struct { + Iterator[V] + cmp func(V) BoundsCheck +} + +func (bi *BoundedIter[V]) Next() bool { + for bi.Iterator.Next() { + switch bi.cmp(bi.Iterator.At()) { + case Before: + continue + case After: + return false + default: + return true + } + } + return false +} + +func NewBoundedIter[V any](itr Iterator[V], cmp func(V) BoundsCheck) *BoundedIter[V] { + return &BoundedIter[V]{Iterator: itr, cmp: cmp} +} From c716e498ad83a9dc5f5c5430c555b19e86d2586f Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Mon, 20 Nov 2023 11:48:12 +0530 Subject: [PATCH 15/48] compactor: do not block compation when retention is taking too long (#9884) **What this PR does / why we need it**: Currently, we perform compaction and apply retention in the same loop. Although we have a flag for not applying retention every time we perform compaction, we still see compaction getting blocked when processing some intensive delete requests(processed while applying retention). This PR separates out the compaction and retention to run in a separate loop. I have added a table-locking feature to avoid compaction and retention from processing the same tables at a time. However, compaction and retention would treat locked tables differently, as explained below: * When compaction sees a table is locked: It would skip the table and move on to the following table. However, before skipping, it would check if the table has any uncompacted files and increment the newly added counter called `loki_compactor_skipped_compacting_locked_tables_total` to track how often we are skipping compacting tables which have uncompacted files. * When retention sees a table is locked: It would wait for the lock to be released since we can't skip any tables while processing delete requests. **Special notes for your reviewer**: * The check for tables with uncompacted files looks for count > 1 because initially, we did not support per tenant index in `boltdb-shipper`, so a table can have a single compacted multi-tenant index file. In a rare case where we have a single file which was supposed to be compacted away, it is okay to have a single uncompacted file for a while. The aim here is to not block compaction for too long in a large cell with too many uncompacted files. * Retention only works on the compacted index, so we first compact down any uncompacted files while applying retention. **Checklist** - [x] Tests updated --------- Co-authored-by: Ashwanth --- pkg/compactor/compactor.go | 104 ++++++++++++++++------ pkg/compactor/compactor_test.go | 150 +++++++++++++++++++++++++++++++- pkg/compactor/metrics.go | 26 ++++-- pkg/compactor/table.go | 8 ++ pkg/compactor/table_locker.go | 52 +++++++++++ 5 files changed, 301 insertions(+), 39 deletions(-) create mode 100644 pkg/compactor/table_locker.go diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index a45248af6bbd3..64f89fc696b9f 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -124,8 +124,8 @@ func (cfg *Config) Validate() error { return fmt.Errorf("compactor.delete-request-store should be configured when retention is enabled") } - if cfg.ApplyRetentionInterval != 0 && cfg.ApplyRetentionInterval%cfg.CompactionInterval != 0 { - return fmt.Errorf("interval for applying retention should either be set to a 0 or a multiple of compaction interval") + if cfg.ApplyRetentionInterval == 0 { + cfg.ApplyRetentionInterval = cfg.CompactionInterval } if err := config.ValidatePathPrefix(cfg.DeleteRequestStoreKeyPrefix); err != nil { @@ -153,6 +153,7 @@ type Compactor struct { wg sync.WaitGroup indexCompactors map[string]IndexCompactor schemaConfig config.SchemaConfig + tableLocker *tableLocker // Ring used for running a single compactor ringLifecycler *ring.BasicLifecycler @@ -193,6 +194,7 @@ func NewCompactor(cfg Config, objectStoreClients map[config.DayTime]client.Objec ringPollPeriod: 5 * time.Second, indexCompactors: map[string]IndexCompactor{}, schemaConfig: schemaConfig, + tableLocker: newTableLocker(), } ringStore, err := kv.NewClient( @@ -503,41 +505,52 @@ func (c *Compactor) runCompactions(ctx context.Context) { } }() - lastRetentionRunAt := time.Unix(0, 0) - runCompaction := func() { - applyRetention := false - if c.cfg.RetentionEnabled && time.Since(lastRetentionRunAt) >= c.cfg.ApplyRetentionInterval { - level.Info(util_log.Logger).Log("msg", "applying retention with compaction") - applyRetention = true - } + // do the initial compaction + if err := c.RunCompaction(ctx, false); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to run compaction", err) + } - err := c.RunCompaction(ctx, applyRetention) - if err != nil { - level.Error(util_log.Logger).Log("msg", "failed to run compaction", "err", err) - } + c.wg.Add(1) + go func() { + defer c.wg.Done() - if applyRetention { - lastRetentionRunAt = time.Now() + ticker := time.NewTicker(c.cfg.CompactionInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := c.RunCompaction(ctx, false); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to run compaction", err) + } + case <-ctx.Done(): + return + } } - } + }() c.wg.Add(1) go func() { defer c.wg.Done() - runCompaction() + if err := c.RunCompaction(ctx, true); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to apply retention", err) + } - ticker := time.NewTicker(c.cfg.CompactionInterval) + ticker := time.NewTicker(c.cfg.ApplyRetentionInterval) defer ticker.Stop() for { select { case <-ticker.C: - runCompaction() + if err := c.RunCompaction(ctx, true); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to apply retention", err) + } case <-ctx.Done(): return } } }() + if c.cfg.RetentionEnabled { for _, container := range c.storeContainers { c.wg.Add(1) @@ -576,6 +589,37 @@ func (c *Compactor) CompactTable(ctx context.Context, tableName string, applyRet return fmt.Errorf("index store client not found for period starting at %s", schemaCfg.From.String()) } + for { + locked, lockWaiterChan := c.tableLocker.lockTable(tableName) + if locked { + break + } + // do not wait for lock to be released if we are only compacting the table since + // compaction should happen more frequently than retention and retention anyway compacts un-compacted files as well. + if !applyRetention { + hasUncompactedIndex, err := tableHasUncompactedIndex(ctx, tableName, sc.indexStorageClient) + if err != nil { + level.Error(util_log.Logger).Log("msg", "failed to check if table has uncompacted index", "table_name", tableName) + hasUncompactedIndex = true + } + + if hasUncompactedIndex { + c.metrics.skippedCompactingLockedTables.Inc() + level.Warn(util_log.Logger).Log("msg", "skipped compacting table which likely has uncompacted index since it is locked by retention", "table_name", tableName) + } + return nil + } + + // we are applying retention and processing delete requests so, + // wait for lock to be released since we can't mark delete requests as processed without checking all the tables + select { + case <-lockWaiterChan: + case <-ctx.Done(): + return nil + } + } + defer c.tableLocker.unlockTable(tableName) + table, err := newTable(ctx, filepath.Join(c.cfg.WorkingDirectory, tableName), sc.indexStorageClient, indexCompactor, schemaCfg, sc.tableMarker, c.expirationChecker, c.cfg.UploadParallelism) if err != nil { @@ -601,7 +645,7 @@ func (c *Compactor) RegisterIndexCompactor(indexType string, indexCompactor Inde c.indexCompactors[indexType] = indexCompactor } -func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) error { +func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) (err error) { status := statusSuccess start := time.Now() @@ -610,11 +654,15 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro } defer func() { - c.metrics.compactTablesOperationTotal.WithLabelValues(status).Inc() + if err != nil { + status = statusFailure + } + withRetentionLabelValue := fmt.Sprintf("%v", applyRetention) + c.metrics.compactTablesOperationTotal.WithLabelValues(status, withRetentionLabelValue).Inc() runtime := time.Since(start) if status == statusSuccess { - c.metrics.compactTablesOperationDurationSeconds.Set(runtime.Seconds()) - c.metrics.compactTablesOperationLastSuccess.SetToCurrentTime() + c.metrics.compactTablesOperationDurationSeconds.WithLabelValues(withRetentionLabelValue).Set(runtime.Seconds()) + c.metrics.compactTablesOperationLastSuccess.WithLabelValues(withRetentionLabelValue).SetToCurrentTime() if applyRetention { c.metrics.applyRetentionLastSuccess.SetToCurrentTime() } @@ -627,7 +675,7 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro c.expirationChecker.MarkPhaseFailed() } } - if runtime > c.cfg.CompactionInterval { + if !applyRetention && runtime > c.cfg.CompactionInterval { level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("last compaction took %s which is longer than the compaction interval of %s, this can lead to duplicate compactors running if not running a standalone compactor instance.", runtime, c.cfg.CompactionInterval)) } }() @@ -644,7 +692,6 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro sc.indexStorageClient.RefreshIndexTableNamesCache(ctx) tbls, err := sc.indexStorageClient.ListTables(ctx) if err != nil { - status = statusFailure return fmt.Errorf("failed to list tables: %w", err) } @@ -721,12 +768,15 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) erro for i := 0; i < c.cfg.MaxCompactionParallelism; i++ { err := <-errChan if err != nil && firstErr == nil { - status = statusFailure firstErr = err } } - return firstErr + if firstErr != nil { + return firstErr + } + + return ctx.Err() } type expirationChecker struct { diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 854339ca6ecaf..6913956aaa70e 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -10,6 +10,8 @@ import ( "time" "github.com/grafana/dskit/flagext" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -18,6 +20,7 @@ import ( "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/util/constants" loki_net "github.com/grafana/loki/pkg/util/net" + "github.com/grafana/loki/pkg/validation" ) const indexTablePrefix = "table_" @@ -41,7 +44,8 @@ func setupTestCompactor(t *testing.T, objectClients map[config.DayTime]client.Ob cfg := Config{} flagext.DefaultValues(&cfg) cfg.WorkingDirectory = filepath.Join(tempDir, workingDirName) - cfg.RetentionEnabled = false + cfg.RetentionEnabled = true + cfg.DeleteRequestStore = periodConfigs[len(periodConfigs)-1].ObjectType cfg.CompactorRing.InstanceAddr = localhost if loopbackIFace, err := loki_net.LoopbackInterfaceName(); err == nil { @@ -50,9 +54,16 @@ func setupTestCompactor(t *testing.T, objectClients map[config.DayTime]client.Ob require.NoError(t, cfg.Validate()) - c, err := NewCompactor(cfg, objectClients, nil, config.SchemaConfig{ + defaultLimits := validation.Limits{} + flagext.DefaultValues(&defaultLimits) + require.NoError(t, defaultLimits.RetentionPeriod.Set("30d")) + + overrides, err := validation.NewOverrides(defaultLimits, nil) + require.NoError(t, err) + + c, err := NewCompactor(cfg, objectClients, objectClients[periodConfigs[len(periodConfigs)-1].From], config.SchemaConfig{ Configs: periodConfigs, - }, nil, nil, constants.Loki) + }, overrides, prometheus.NewPedanticRegistry(), constants.Loki) require.NoError(t, err) c.RegisterIndexCompactor("dummy", testIndexCompactor{}) @@ -292,3 +303,136 @@ func Test_tableSort(t *testing.T) { sortTablesByRange(intervals) require.Equal(t, []string{"index_19195", "index_19192", "index_19191"}, intervals) } + +func TestCompactor_TableLocking(t *testing.T) { + commonDBsConfig := IndexesConfig{NumUnCompactedFiles: 5} + perUserDBsConfig := PerUserIndexesConfig{} + + daySeconds := int64(24 * time.Hour / time.Second) + tableNumEnd := time.Now().Unix() / daySeconds + tableNumStart := tableNumEnd - 5 + + setupCompactorAndIndex := func(tempDir string) *Compactor { + tablesPath := filepath.Join(tempDir, "index") + + periodConfigs := []config.PeriodConfig{ + { + From: config.DayTime{Time: model.Time(0)}, + IndexType: "dummy", + ObjectType: "fs_01", + IndexTables: config.IndexPeriodicTableConfig{ + PathPrefix: "index/", + PeriodicTableConfig: config.PeriodicTableConfig{ + Prefix: indexTablePrefix, + Period: config.ObjectStorageIndexRequiredPeriod, + }}, + }, + } + + for i := tableNumStart; i <= tableNumEnd; i++ { + SetupTable(t, filepath.Join(tablesPath, fmt.Sprintf("%s%d", indexTablePrefix, i)), IndexesConfig{NumUnCompactedFiles: 5}, PerUserIndexesConfig{}) + } + + var ( + objectClients = map[config.DayTime]client.ObjectClient{} + err error + ) + objectClients[periodConfigs[0].From], err = local.NewFSObjectClient(local.FSConfig{Directory: tempDir}) + require.NoError(t, err) + + return setupTestCompactor(t, objectClients, periodConfigs, tempDir) + } + + for _, tc := range []struct { + name string + lockTable string + applyRetention bool + + compactionShouldTimeout bool + }{ + { + name: "no table locked - not applying retention", + }, + { + name: "no table locked - applying retention", + applyRetention: true, + }, + { + name: "first table locked - not applying retention", + lockTable: fmt.Sprintf("%s%d", indexTablePrefix, tableNumEnd), + }, + { + name: "first table locked - applying retention", + lockTable: fmt.Sprintf("%s%d", indexTablePrefix, tableNumEnd), + applyRetention: true, + compactionShouldTimeout: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + tempDir := t.TempDir() + tablesPath := filepath.Join(tempDir, "index") + compactor := setupCompactorAndIndex(tempDir) + + // run the compaction twice, 2nd time without any table locking + for n := 1; n <= 2; n++ { + t.Run(fmt.Sprintf("%d", n), func(t *testing.T) { + // lock table only for the first run + if n == 1 && tc.lockTable != "" { + locked, _ := compactor.tableLocker.lockTable(tc.lockTable) + require.True(t, locked) + + defer compactor.tableLocker.unlockTable(tc.lockTable) + } + + // set a timeout so that retention does not get blocked forever on acquiring table lock. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + err := compactor.RunCompaction(ctx, tc.applyRetention) + // compaction should not timeout after first run since we won't be locking the table + if n == 1 && tc.compactionShouldTimeout { + require.ErrorIs(t, err, context.DeadlineExceeded) + require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusFailure, "true"))) + require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusFailure, "false"))) + return + } + require.NoError(t, err) + + if n > 1 && tc.compactionShouldTimeout { + // this should be the first successful run if compaction was expected to be timeout out during first run + require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess, fmt.Sprintf("%v", tc.applyRetention)))) + } else { + // else it should have succeeded during all the n runs + require.Equal(t, float64(n), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess, fmt.Sprintf("%v", tc.applyRetention)))) + } + require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess, fmt.Sprintf("%v", !tc.applyRetention)))) + + // if the table was locked and compaction ran without retention then only locked table should have been skipped + if tc.lockTable != "" { + if tc.applyRetention { + require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables)) + } else { + require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables)) + } + } + + for tableNum := tableNumStart; tableNum <= tableNumEnd; tableNum++ { + name := fmt.Sprintf("%s%d", indexTablePrefix, tableNum) + files, err := os.ReadDir(filepath.Join(tablesPath, name)) + require.NoError(t, err) + + if n == 1 && name == tc.lockTable { + // locked table should not be compacted during first run + require.Len(t, files, 5) + } else { + require.Len(t, files, 1) + require.True(t, strings.HasSuffix(files[0].Name(), ".gz")) + + verifyCompactedIndexTable(t, commonDBsConfig, perUserDBsConfig, filepath.Join(tablesPath, name)) + } + } + }) + } + }) + } +} diff --git a/pkg/compactor/metrics.go b/pkg/compactor/metrics.go index b81ae2ab51da4..7cbf404c81848 100644 --- a/pkg/compactor/metrics.go +++ b/pkg/compactor/metrics.go @@ -8,14 +8,17 @@ import ( const ( statusFailure = "failure" statusSuccess = "success" + + lblWithRetention = "with_retention" ) type metrics struct { compactTablesOperationTotal *prometheus.CounterVec - compactTablesOperationDurationSeconds prometheus.Gauge - compactTablesOperationLastSuccess prometheus.Gauge + compactTablesOperationDurationSeconds *prometheus.GaugeVec + compactTablesOperationLastSuccess *prometheus.GaugeVec applyRetentionLastSuccess prometheus.Gauge compactorRunning prometheus.Gauge + skippedCompactingLockedTables prometheus.Counter } func newMetrics(r prometheus.Registerer) *metrics { @@ -23,18 +26,18 @@ func newMetrics(r prometheus.Registerer) *metrics { compactTablesOperationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ Namespace: "loki_boltdb_shipper", Name: "compact_tables_operation_total", - Help: "Total number of tables compaction done by status", - }, []string{"status"}), - compactTablesOperationDurationSeconds: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Help: "Total number of tables compaction done by status and with/without retention", + }, []string{"status", lblWithRetention}), + compactTablesOperationDurationSeconds: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ Namespace: "loki_boltdb_shipper", Name: "compact_tables_operation_duration_seconds", - Help: "Time (in seconds) spent in compacting all the tables", - }), - compactTablesOperationLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Help: "Time (in seconds) spent in compacting all the tables with/without retention", + }, []string{lblWithRetention}), + compactTablesOperationLastSuccess: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ Namespace: "loki_boltdb_shipper", Name: "compact_tables_operation_last_successful_run_timestamp_seconds", Help: "Unix timestamp of the last successful compaction run", - }), + }, []string{lblWithRetention}), applyRetentionLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Namespace: "loki_boltdb_shipper", Name: "apply_retention_last_successful_run_timestamp_seconds", @@ -45,6 +48,11 @@ func newMetrics(r prometheus.Registerer) *metrics { Name: "compactor_running", Help: "Value will be 1 if compactor is currently running on this instance", }), + skippedCompactingLockedTables: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: "loki_compactor", + Name: "skipped_compacting_locked_tables_total", + Help: "Count of uncompacted tables being skipped due to them being locked by retention", + }), } return &m diff --git a/pkg/compactor/table.go b/pkg/compactor/table.go index 92059a7c15e29..b7b94627c7415 100644 --- a/pkg/compactor/table.go +++ b/pkg/compactor/table.go @@ -265,3 +265,11 @@ func (t *table) openCompactedIndexForRetention(idxSet *indexSet) error { return nil } + +// tableHasUncompactedIndex returns true if we have more than "1" common index files. +// We are checking for more than "1" because earlier boltdb-shipper index type did not have per tenant index so there would be only common index files. +// In case of per tenant index, it is okay to consider it compacted since having just 1 uncompacted index file for a while should be fine. +func tableHasUncompactedIndex(ctx context.Context, tableName string, indexStorageClient storage.Client) (bool, error) { + commonIndexFiles, _, err := indexStorageClient.ListFiles(ctx, tableName, false) + return len(commonIndexFiles) > 1, err +} diff --git a/pkg/compactor/table_locker.go b/pkg/compactor/table_locker.go new file mode 100644 index 0000000000000..bce818a5d2b62 --- /dev/null +++ b/pkg/compactor/table_locker.go @@ -0,0 +1,52 @@ +package compactor + +import "sync" + +type lockWaiterChan chan struct{} + +type tableLocker struct { + lockedTables map[string]lockWaiterChan + lockedTablesMtx sync.RWMutex +} + +func newTableLocker() *tableLocker { + return &tableLocker{ + lockedTables: map[string]lockWaiterChan{}, + } +} + +// lockTable attempts to lock a table. It returns true if the lock gets acquired for the caller. +// It also returns a channel which the caller can watch to detect unlocking of table if it was already locked by some other caller. +func (t *tableLocker) lockTable(tableName string) (bool, <-chan struct{}) { + locked := false + + t.lockedTablesMtx.RLock() + c, ok := t.lockedTables[tableName] + t.lockedTablesMtx.RUnlock() + if ok { + return false, c + } + + t.lockedTablesMtx.Lock() + defer t.lockedTablesMtx.Unlock() + + c, ok = t.lockedTables[tableName] + if !ok { + t.lockedTables[tableName] = make(chan struct{}) + c = t.lockedTables[tableName] + locked = true + } + + return locked, c +} + +func (t *tableLocker) unlockTable(tableName string) { + t.lockedTablesMtx.Lock() + defer t.lockedTablesMtx.Unlock() + + c, ok := t.lockedTables[tableName] + if ok { + close(c) + } + delete(t.lockedTables, tableName) +} From 30d0030bf9f5ba0bdbf5bad27711456537ae7ca0 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Mon, 20 Nov 2023 11:00:13 +0100 Subject: [PATCH 16/48] inflight-logging: Add extra metadata to inflight requests logging (#11243) **What this PR does / why we need it**: logging: Add extra metadata to inflight requests This adds extra metadata (similar to what we have in `metrics.go`) but for queries in in-flight (both started and retrying) Changes: Adds following data 1. Query Hash 2. Start and End time 3. Start and End delta 4. Length of the query 5. Moved the helper util to `queryutil` package because of cyclic dependencies with `logql` package. **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: Find the screenshots of log entries looks like (both in `retry.go` and `roundtrip.go`) ![Screenshot 2023-11-16 at 13 01 32](https://github.com/grafana/loki/assets/3735252/177e97ed-6ee8-41dd-b088-2e4f49562ba0) ![Screenshot 2023-11-16 at 13 02 15](https://github.com/grafana/loki/assets/3735252/fb328a37-dbe3-483e-b083-f21327858029) **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [x] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --------- Signed-off-by: Kaviraj --- CHANGELOG.md | 1 + Makefile | 4 +++- pkg/logql/blocker.go | 3 ++- pkg/logql/blocker_test.go | 5 +++-- pkg/logql/engine.go | 2 +- pkg/logql/engine_test.go | 2 +- pkg/logql/metrics.go | 18 ++++++----------- pkg/logql/metrics_test.go | 7 ++++--- .../queryrange/queryrangebase/retry.go | 20 ++++++++++++++++++- pkg/querier/queryrange/roundtrip.go | 18 ++++++++++++++--- pkg/ruler/compat.go | 8 ++++---- pkg/ruler/evaluator_jitter.go | 4 ++-- pkg/ruler/evaluator_remote.go | 4 ++-- pkg/util/hash_fp.go | 13 +++++++++++- 14 files changed, 75 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65c97f3d8dee5..13136a5b41cb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ ##### Enhancements +* [11243](https://github.com/grafana/loki/pull/11243) **kavirajk**: Inflight-logging: Add extra metadata to inflight requests logging. * [11110](https://github.com/grafana/loki/pull/11003) **MichelHollands**: Change the default of the `metrics-namespace` flag to 'loki'. * [11086](https://github.com/grafana/loki/pull/11086) **kandrew5**: Helm: Allow topologySpreadConstraints * [11003](https://github.com/grafana/loki/pull/11003) **MichelHollands**: Add the `metrics-namespace` flag to change the namespace of metrics currently using cortex as namespace. diff --git a/Makefile b/Makefile index b1cacb1135333..ee022ba2129f0 100644 --- a/Makefile +++ b/Makefile @@ -42,6 +42,8 @@ BUILD_IMAGE_VERSION ?= 0.31.2 # Docker image info IMAGE_PREFIX ?= grafana +BUILD_IMAGE_PREFIX ?= grafana + IMAGE_TAG ?= $(shell ./tools/image-tag) # Version info for binaries @@ -102,7 +104,7 @@ RM := --rm TTY := --tty DOCKER_BUILDKIT ?= 1 -BUILD_IMAGE = BUILD_IMAGE=$(IMAGE_PREFIX)/loki-build-image:$(BUILD_IMAGE_VERSION) +BUILD_IMAGE = BUILD_IMAGE=$(BUILD_IMAGE_PREFIX)/loki-build-image:$(BUILD_IMAGE_VERSION) PUSH_OCI=docker push TAG_OCI=docker tag ifeq ($(CI), true) diff --git a/pkg/logql/blocker.go b/pkg/logql/blocker.go index d38a640456c30..cbfdc6bf49e3b 100644 --- a/pkg/logql/blocker.go +++ b/pkg/logql/blocker.go @@ -8,6 +8,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/regexp" + "github.com/grafana/loki/pkg/util" logutil "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/util/validation" ) @@ -43,7 +44,7 @@ func (qb *queryBlocker) isBlocked(ctx context.Context, tenant string) bool { for _, b := range blocks { if b.Hash > 0 { - if b.Hash == HashedQuery(query) { + if b.Hash == util.HashedQuery(query) { level.Warn(logger).Log("msg", "query blocker matched with hash policy", "hash", b.Hash, "query", query) return qb.block(b, typ, logger) } diff --git a/pkg/logql/blocker_test.go b/pkg/logql/blocker_test.go index 3dc3b72c81599..e0dc00bf622e7 100644 --- a/pkg/logql/blocker_test.go +++ b/pkg/logql/blocker_test.go @@ -12,6 +12,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/validation" ) @@ -124,7 +125,7 @@ func TestEngine_ExecWithBlockedQueries(t *testing.T) { "correct FNV32 hash matches", defaultQuery, []*validation.BlockedQuery{ { - Hash: HashedQuery(defaultQuery), + Hash: util.HashedQuery(defaultQuery), }, }, logqlmodel.ErrBlocked, }, @@ -132,7 +133,7 @@ func TestEngine_ExecWithBlockedQueries(t *testing.T) { "incorrect FNV32 hash does not match", defaultQuery, []*validation.BlockedQuery{ { - Hash: HashedQuery(defaultQuery) + 1, + Hash: util.HashedQuery(defaultQuery) + 1, }, }, nil, }, diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index 1b85ea05ea760..af680a33b9a97 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -219,7 +219,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) { ) if q.logExecQuery { - queryHash := HashedQuery(q.params.Query()) + queryHash := util.HashedQuery(q.params.Query()) if GetRangeType(q.params) == InstantType { level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "instant", "query", q.params.Query(), "query_hash", queryHash) } else { diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index ef7d5e0538e3d..548400644a31e 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -2669,7 +2669,7 @@ func TestHashingStability(t *testing.T) { {`sum (count_over_time({app="myapp",env="myenv"} |= "error" |= "metrics.go" | logfmt [10s])) by(query_hash)`}, } { params.qs = test.qs - expectedQueryHash := HashedQuery(test.qs) + expectedQueryHash := util.HashedQuery(test.qs) // check that both places will end up having the same query hash, even though they're emitting different log lines. require.Regexp(t, diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go index 3ba3a9c61535d..94a4c2f9dd408 100644 --- a/pkg/logql/metrics.go +++ b/pkg/logql/metrics.go @@ -2,7 +2,6 @@ package logql import ( "context" - "hash/fnv" "strconv" "strings" "time" @@ -19,6 +18,7 @@ import ( "github.com/grafana/loki/pkg/logqlmodel" logql_stats "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/querier/astmapper" + "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/constants" "github.com/grafana/loki/pkg/util/httpreq" util_log "github.com/grafana/loki/pkg/util/log" @@ -120,7 +120,7 @@ func RecordRangeAndInstantQueryMetrics( logValues = append(logValues, []interface{}{ "latency", latencyType, // this can be used to filter log lines. "query", p.Query(), - "query_hash", HashedQuery(p.Query()), + "query_hash", util.HashedQuery(p.Query()), "query_type", queryType, "range_type", rt, "length", p.End().Sub(p.Start()), @@ -187,12 +187,6 @@ func RecordRangeAndInstantQueryMetrics( recordUsageStats(queryType, stats) } -func HashedQuery(query string) uint32 { - h := fnv.New32() - _, _ = h.Write([]byte(query)) - return h.Sum32() -} - func RecordLabelQueryMetrics( ctx context.Context, log log.Logger, @@ -225,7 +219,7 @@ func RecordLabelQueryMetrics( "status", status, "label", label, "query", query, - "query_hash", HashedQuery(query), + "query_hash", util.HashedQuery(query), "total_entries", stats.Summary.TotalEntriesReturned, ) @@ -276,7 +270,7 @@ func RecordSeriesQueryMetrics(ctx context.Context, log log.Logger, start, end ti "duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))), "status", status, "match", PrintMatches(match), - "query_hash", HashedQuery(PrintMatches(match)), + "query_hash", util.HashedQuery(PrintMatches(match)), "total_entries", stats.Summary.TotalEntriesReturned) if shard != nil { @@ -316,7 +310,7 @@ func RecordStatsQueryMetrics(ctx context.Context, log log.Logger, start, end tim "duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))), "status", status, "query", query, - "query_hash", HashedQuery(query), + "query_hash", util.HashedQuery(query), "total_entries", stats.Summary.TotalEntriesReturned) level.Info(logger).Log(logValues...) @@ -346,7 +340,7 @@ func RecordVolumeQueryMetrics(ctx context.Context, log log.Logger, start, end ti "latency", latencyType, "query_type", queryType, "query", query, - "query_hash", HashedQuery(query), + "query_hash", util.HashedQuery(query), "start", start.Format(time.RFC3339Nano), "end", end.Format(time.RFC3339Nano), "start_delta", time.Since(start), diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go index 950a16bb39a73..06d4e2699494e 100644 --- a/pkg/logql/metrics_test.go +++ b/pkg/logql/metrics_test.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/httpreq" util_log "github.com/grafana/loki/pkg/util/log" ) @@ -191,11 +192,11 @@ func Test_testToKeyValues(t *testing.T) { } func TestQueryHashing(t *testing.T) { - h1 := HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`) - h2 := HashedQuery(`{app="myapp",env="myenv"} |= "error" |= logfmt |= "metrics.go"`) + h1 := util.HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`) + h2 := util.HashedQuery(`{app="myapp",env="myenv"} |= "error" |= logfmt |= "metrics.go"`) // check that it capture differences of order. require.NotEqual(t, h1, h2) - h3 := HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`) + h3 := util.HashedQuery(`{app="myapp",env="myenv"} |= "error" |= "metrics.go" |= logfmt`) // check that it evaluate same queries as same hashes, even if evaluated at different timestamps. require.Equal(t, h1, h3) } diff --git a/pkg/querier/queryrange/queryrangebase/retry.go b/pkg/querier/queryrange/queryrangebase/retry.go index 5dbad8d82582a..d051363771bb9 100644 --- a/pkg/querier/queryrange/queryrangebase/retry.go +++ b/pkg/querier/queryrange/queryrangebase/retry.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/grafana/loki/pkg/util" util_log "github.com/grafana/loki/pkg/util/log" ) @@ -73,6 +74,11 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) { MaxRetries: 0, } bk := backoff.New(ctx, cfg) + + start := req.GetStart() + end := req.GetEnd() + query := req.GetQuery() + for ; tries < r.maxRetries; tries++ { if ctx.Err() != nil { return nil, ctx.Err() @@ -86,7 +92,19 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) { httpResp, ok := httpgrpc.HTTPResponseFromError(err) if !ok || httpResp.Code/100 == 5 { lastErr = err - level.Error(util_log.WithContext(ctx, r.log)).Log("msg", "error processing request", "try", tries, "query", req.GetQuery(), "retry_in", bk.NextDelay(), "err", err) + level.Error(util_log.WithContext(ctx, r.log)).Log( + "msg", "error processing request", + "try", tries, + "query", query, + "query_hash", util.HashedQuery(query), + "start", start.Format(time.RFC3339Nano), + "end", end.Format(time.RFC3339Nano), + "start_delta", time.Since(start), + "end_delta", time.Since(end), + "length", end.Sub(start), + "retry_in", bk.NextDelay(), + "err", err, + ) bk.Wait() continue } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index e2a2ed0021690..2b24ab4a917dc 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -24,6 +24,7 @@ import ( base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/constants" logutil "github.com/grafana/loki/pkg/util/log" ) @@ -247,8 +248,19 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - queryHash := logql.HashedQuery(op.Query) - level.Info(logger).Log("msg", "executing query", "type", "range", "query", op.Query, "length", op.EndTs.Sub(op.StartTs), "step", op.Step, "query_hash", queryHash) + queryHash := util.HashedQuery(op.Query) + level.Info(logger).Log( + "msg", "executing query", + "type", "range", + "query", op.Query, + "start", op.StartTs.Format(time.RFC3339Nano), + "end", op.EndTs.Format(time.RFC3339Nano), + "start_delta", time.Since(op.StartTs), + "end_delta", time.Since(op.EndTs), + "length", op.EndTs.Sub(op.StartTs), + "step", op.Step, + "query_hash", queryHash, + ) switch e := expr.(type) { case syntax.SampleExpr: @@ -296,7 +308,7 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - queryHash := logql.HashedQuery(op.Query) + queryHash := util.HashedQuery(op.Query) level.Info(logger).Log("msg", "executing query", "type", "instant", "query", op.Query, "query_hash", queryHash) switch expr.(type) { diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index db6316e9986d0..8f70d314da884 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -24,11 +24,11 @@ import ( "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/template" - "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" ruler "github.com/grafana/loki/pkg/ruler/base" "github.com/grafana/loki/pkg/ruler/rulespb" - "github.com/grafana/loki/pkg/ruler/util" + rulerutil "github.com/grafana/loki/pkg/ruler/util" + "github.com/grafana/loki/pkg/util" ) // RulesLimits is the one function we need from limits.Overrides, and @@ -40,7 +40,7 @@ type RulesLimits interface { RulerRemoteWriteURL(userID string) string RulerRemoteWriteTimeout(userID string) time.Duration RulerRemoteWriteHeaders(userID string) map[string]string - RulerRemoteWriteRelabelConfigs(userID string) []*util.RelabelConfig + RulerRemoteWriteRelabelConfigs(userID string) []*rulerutil.RelabelConfig RulerRemoteWriteConfig(userID string, id string) *config.RemoteWriteConfig RulerRemoteWriteQueueCapacity(userID string) int RulerRemoteWriteQueueMinShards(userID string) int @@ -60,7 +60,7 @@ type RulesLimits interface { // and passing an altered timestamp. func queryFunc(evaluator Evaluator, checker readyChecker, userID string, logger log.Logger) rules.QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - hash := logql.HashedQuery(qs) + hash := util.HashedQuery(qs) detail := rules.FromOriginContext(ctx) detailLog := log.With(logger, "rule_name", detail.Name, "rule_type", detail.Kind, "query", qs, "query_hash", hash) diff --git a/pkg/ruler/evaluator_jitter.go b/pkg/ruler/evaluator_jitter.go index ef337c73396be..449ca0e18011c 100644 --- a/pkg/ruler/evaluator_jitter.go +++ b/pkg/ruler/evaluator_jitter.go @@ -10,8 +10,8 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/util" ) // EvaluatorWithJitter wraps a given Evaluator. It applies a consistent jitter based on a rule's query string by hashing @@ -44,7 +44,7 @@ func NewEvaluatorWithJitter(inner Evaluator, maxJitter time.Duration, hasher has } func (e *EvaluatorWithJitter) Eval(ctx context.Context, qs string, now time.Time) (*logqlmodel.Result, error) { - logger := log.With(e.logger, "query", qs, "query_hash", logql.HashedQuery(qs)) + logger := log.With(e.logger, "query", qs, "query_hash", util.HashedQuery(qs)) jitter := e.calculateJitter(qs, logger) if jitter > 0 { diff --git a/pkg/ruler/evaluator_remote.go b/pkg/ruler/evaluator_remote.go index 4f953876d6c0f..97a0c1ce7f9dd 100644 --- a/pkg/ruler/evaluator_remote.go +++ b/pkg/ruler/evaluator_remote.go @@ -36,8 +36,8 @@ import ( "google.golang.org/grpc/keepalive" "github.com/grafana/loki/pkg/loghttp" - "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/build" "github.com/grafana/loki/pkg/util/constants" "github.com/grafana/loki/pkg/util/httpreq" @@ -220,7 +220,7 @@ func (r *RemoteEvaluator) query(ctx context.Context, orgID, query string, ts tim args.Set("time", ts.Format(time.RFC3339Nano)) } body := []byte(args.Encode()) - hash := logql.HashedQuery(query) + hash := util.HashedQuery(query) req := httpgrpc.HTTPRequest{ Method: http.MethodPost, diff --git a/pkg/util/hash_fp.go b/pkg/util/hash_fp.go index 209b8b45c0646..e7c0253865b65 100644 --- a/pkg/util/hash_fp.go +++ b/pkg/util/hash_fp.go @@ -1,6 +1,10 @@ package util -import "github.com/prometheus/common/model" +import ( + "hash/fnv" + + "github.com/prometheus/common/model" +) // HashFP simply moves entropy from the most significant 48 bits of the // fingerprint into the least significant 16 bits (by XORing) so that a simple @@ -12,3 +16,10 @@ import "github.com/prometheus/common/model" func HashFP(fp model.Fingerprint) uint32 { return uint32(fp ^ (fp >> 32) ^ (fp >> 16)) } + +// HashedQuery returns a unique hash value for the given `query`. +func HashedQuery(query string) uint32 { + h := fnv.New32() + _, _ = h.Write([]byte(query)) + return h.Sum32() +} From 0c10f28a4601a78780de8b2e081dc52aed88b4eb Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Mon, 20 Nov 2023 12:27:27 +0100 Subject: [PATCH 17/48] operator: Adds new value v13 to schema (#10932) Co-authored-by: Periklis Tsirakidis --- operator/CHANGELOG.md | 1 + operator/apis/loki/v1/lokistack_types.go | 7 +- .../loki-operator.clusterserviceversion.yaml | 1 + .../loki.grafana.com_lokistacks.yaml | 2 + .../loki-operator.clusterserviceversion.yaml | 1 + .../loki.grafana.com_lokistacks.yaml | 2 + .../loki-operator.clusterserviceversion.yaml | 1 + .../loki.grafana.com_lokistacks.yaml | 2 + .../bases/loki.grafana.com_lokistacks.yaml | 2 + .../loki-operator.clusterserviceversion.yaml | 1 + .../loki-operator.clusterserviceversion.yaml | 1 + .../loki-operator.clusterserviceversion.yaml | 1 + operator/docs/operator/api.md | 3 + operator/hack/lokistack_dev.yaml | 4 +- operator/hack/lokistack_gateway_dev.yaml | 4 +- operator/hack/lokistack_gateway_ocp.yaml | 4 +- operator/internal/manifests/config.go | 16 + operator/internal/manifests/config_test.go | 86 ++++ .../manifests/internal/config/build_test.go | 471 +++++++++++++++++- .../internal/config/loki-config.yaml | 39 +- .../manifests/internal/config/options.go | 1 + 21 files changed, 616 insertions(+), 34 deletions(-) diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 0ad4b10eada0d..86b79e7d55911 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [10932](https://github.com/grafana/loki/pull/10932) **JoaoBraveCoding**: Adds new value v13 to schema - [11232](https://github.com/grafana/loki/pull/11232) **periklis**: Update dependencies and dev tools - [11129](https://github.com/grafana/loki/pull/11129) **periklis**: Update deps to secure webhooks for CVE-2023-44487 diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go index 1f5d9a904027e..382b6f1795d85 100644 --- a/operator/apis/loki/v1/lokistack_types.go +++ b/operator/apis/loki/v1/lokistack_types.go @@ -532,7 +532,7 @@ type ObjectStorageSecretSpec struct { // ObjectStorageSchemaVersion defines the storage schema version which will be // used with the Loki cluster. // -// +kubebuilder:validation:Enum=v11;v12 +// +kubebuilder:validation:Enum=v11;v12;v13 type ObjectStorageSchemaVersion string const ( @@ -541,6 +541,9 @@ const ( // ObjectStorageSchemaV12 when using v12 for the storage schema ObjectStorageSchemaV12 ObjectStorageSchemaVersion = "v12" + + // ObjectStorageSchemaV13 when using v13 for the storage schema + ObjectStorageSchemaV13 ObjectStorageSchemaVersion = "v13" ) // ObjectStorageSchema defines the requirements needed to configure a new @@ -550,7 +553,7 @@ type ObjectStorageSchema struct { // // +required // +kubebuilder:validation:Required - // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:v11","urn:alm:descriptor:com.tectonic.ui:select:v12"},displayName="Version" + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:v11","urn:alm:descriptor:com.tectonic.ui:select:v12","urn:alm:descriptor:com.tectonic.ui:select:v13"},displayName="Version" Version ObjectStorageSchemaVersion `json:"version"` // EffectiveDate is the date in UTC that the schema will be applied on. diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index 205a55b57684d..8a315a261bd09 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -544,6 +544,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:v11 - urn:alm:descriptor:com.tectonic.ui:select:v12 + - urn:alm:descriptor:com.tectonic.ui:select:v13 - description: Name of a secret in the namespace configured for object storage secrets. displayName: Object Storage Secret Name diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml index 1a4120613e358..11d653fdd332f 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -545,6 +545,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate @@ -4032,6 +4033,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index 53c2779062096..08b6919daab9a 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -544,6 +544,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:v11 - urn:alm:descriptor:com.tectonic.ui:select:v12 + - urn:alm:descriptor:com.tectonic.ui:select:v13 - description: Name of a secret in the namespace configured for object storage secrets. displayName: Object Storage Secret Name diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml index 3c0f8321ebe60..5e77c9ba4f1ac 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml @@ -545,6 +545,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate @@ -4032,6 +4033,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index 546119d928f6a..f9828c5051644 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -557,6 +557,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:v11 - urn:alm:descriptor:com.tectonic.ui:select:v12 + - urn:alm:descriptor:com.tectonic.ui:select:v13 - description: Name of a secret in the namespace configured for object storage secrets. displayName: Object Storage Secret Name diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml index c34ebd59c8fa8..def6c0ed0777d 100644 --- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -545,6 +545,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate @@ -4032,6 +4033,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml index 1acdaa2418eb3..56c33d835cd7b 100644 --- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml +++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml @@ -527,6 +527,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate @@ -4014,6 +4015,7 @@ spec: enum: - v11 - v12 + - v13 type: string required: - effectiveDate diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml index 91dcd4100b59e..71af0623e6223 100644 --- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml @@ -457,6 +457,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:v11 - urn:alm:descriptor:com.tectonic.ui:select:v12 + - urn:alm:descriptor:com.tectonic.ui:select:v13 - description: Name of a secret in the namespace configured for object storage secrets. displayName: Object Storage Secret Name diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml index a41a17cba4608..a3ffabdea5e83 100644 --- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml @@ -457,6 +457,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:v11 - urn:alm:descriptor:com.tectonic.ui:select:v12 + - urn:alm:descriptor:com.tectonic.ui:select:v13 - description: Name of a secret in the namespace configured for object storage secrets. displayName: Object Storage Secret Name diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml index b63141b8ebf3d..ab3143fc2b60e 100644 --- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml @@ -469,6 +469,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:v11 - urn:alm:descriptor:com.tectonic.ui:select:v12 + - urn:alm:descriptor:com.tectonic.ui:select:v13 - description: Name of a secret in the namespace configured for object storage secrets. displayName: Object Storage Secret Name diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md index 8f3501a1cf47e..20e4b0d6adc2a 100644 --- a/operator/docs/operator/api.md +++ b/operator/docs/operator/api.md @@ -2484,6 +2484,9 @@ used with the Loki cluster.

"v12"

ObjectStorageSchemaV12 when using v12 for the storage schema

+

"v13"

+

ObjectStorageSchemaV13 when using v13 for the storage schema

+ diff --git a/operator/hack/lokistack_dev.yaml b/operator/hack/lokistack_dev.yaml index 573bba382416b..c05bbede47452 100644 --- a/operator/hack/lokistack_dev.yaml +++ b/operator/hack/lokistack_dev.yaml @@ -6,8 +6,8 @@ spec: size: 1x.demo storage: schemas: - - version: v12 - effectiveDate: 2022-06-01 + - version: v13 + effectiveDate: 2023-10-15 secret: name: test type: s3 diff --git a/operator/hack/lokistack_gateway_dev.yaml b/operator/hack/lokistack_gateway_dev.yaml index c7f81d1f50e65..0cbe605c4b4ea 100644 --- a/operator/hack/lokistack_gateway_dev.yaml +++ b/operator/hack/lokistack_gateway_dev.yaml @@ -14,8 +14,8 @@ spec: size: 1x.demo storage: schemas: - - version: v12 - effectiveDate: 2022-06-01 + - version: v13 + effectiveDate: 2023-10-15 secret: name: test type: s3 diff --git a/operator/hack/lokistack_gateway_ocp.yaml b/operator/hack/lokistack_gateway_ocp.yaml index 723009c0a5eec..5fb6b3cc3efb6 100644 --- a/operator/hack/lokistack_gateway_ocp.yaml +++ b/operator/hack/lokistack_gateway_ocp.yaml @@ -7,8 +7,8 @@ spec: size: 1x.demo storage: schemas: - - version: v12 - effectiveDate: 2022-06-01 + - version: v13 + effectiveDate: 2023-10-15 secret: name: test type: s3 diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go index e3aed82610eee..33608b83994e6 100644 --- a/operator/internal/manifests/config.go +++ b/operator/internal/manifests/config.go @@ -117,6 +117,21 @@ func ConfigOptions(opt Options) config.Options { opt.Stack.Replication.Factor = opt.Stack.ReplicationFactor } + // Build a slice of with the shippers that are being used in the config + // booleans used to prevent duplicates + shippers := []string{} + boltdb := false + tsdb := false + for _, schema := range opt.Stack.Storage.Schemas { + if !boltdb && (schema.Version == lokiv1.ObjectStorageSchemaV11 || schema.Version == lokiv1.ObjectStorageSchemaV12) { + shippers = append(shippers, "boltdb") + boltdb = true + } else if !tsdb { + shippers = append(shippers, "tsdb") + tsdb = true + } + } + return config.Options{ Stack: opt.Stack, Gates: opt.Gates, @@ -175,6 +190,7 @@ func ConfigOptions(opt Options) config.Options { Directory: walDirectory, IngesterMemoryRequest: opt.ResourceRequirements.Ingester.Requests.Memory().Value(), }, + Shippers: shippers, ObjectStorage: opt.ObjectStorage, HTTPTimeouts: opt.Timeouts.Loki, EnableRemoteReporting: opt.Gates.GrafanaLabsUsageReport, diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go index 268c83853b20e..5df65eb709cc5 100644 --- a/operator/internal/manifests/config_test.go +++ b/operator/internal/manifests/config_test.go @@ -1323,3 +1323,89 @@ func TestConfigOptions_ServerOptions(t *testing.T) { require.Equal(t, want, got.HTTPTimeouts) } + +func TestConfigOptions_Shipper(t *testing.T) { + for _, tc := range []struct { + name string + inOpt Options + wantShipper []string + }{ + { + name: "default_config_v11_schema", + inOpt: Options{ + Stack: lokiv1.LokiStackSpec{ + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-01", + }, + }, + }, + }, + }, + wantShipper: []string{"boltdb"}, + }, + { + name: "v12_schema", + inOpt: Options{ + Stack: lokiv1.LokiStackSpec{ + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV12, + EffectiveDate: "2020-02-05", + }, + }, + }, + }, + }, + wantShipper: []string{"boltdb"}, + }, + { + name: "v13_schema", + inOpt: Options{ + Stack: lokiv1.LokiStackSpec{ + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV13, + EffectiveDate: "2024-01-01", + }, + }, + }, + }, + }, + wantShipper: []string{"tsdb"}, + }, + { + name: "multiple_schema", + inOpt: Options{ + Stack: lokiv1.LokiStackSpec{ + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-01-01", + }, + { + Version: lokiv1.ObjectStorageSchemaV12, + EffectiveDate: "2021-01-01", + }, + { + Version: lokiv1.ObjectStorageSchemaV13, + EffectiveDate: "2024-01-01", + }, + }, + }, + }, + }, + wantShipper: []string{"boltdb", "tsdb"}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + got := ConfigOptions(tc.inOpt) + require.Equal(t, tc.wantShipper, got.Shippers) + }) + } +} diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go index 2972b15377950..8b8ec4a2de156 100644 --- a/operator/internal/manifests/internal/config/build_test.go +++ b/operator/internal/manifests/internal/config/build_test.go @@ -1,6 +1,7 @@ package config import ( + "strings" "testing" "time" @@ -43,7 +44,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -102,6 +103,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -110,6 +112,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -251,6 +254,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -295,7 +299,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -354,6 +358,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -362,6 +367,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -534,6 +540,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, ReadTimeout: 30 * time.Second, @@ -618,6 +625,7 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) { }, }, }, + Shippers: []string{"boltdb"}, } cfg, rCfg, err := Build(opts) require.Error(t, err) @@ -656,7 +664,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -715,6 +723,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -723,6 +732,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -965,6 +975,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -1009,7 +1020,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -1068,6 +1079,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -1076,6 +1088,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -1319,6 +1332,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -1363,7 +1377,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -1422,6 +1436,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -1430,6 +1445,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -1703,6 +1719,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -1750,7 +1767,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -1809,6 +1826,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -1822,6 +1840,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -2029,6 +2048,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, Retention: RetentionOptions{ Enabled: true, DeleteWorkerCount: 50, @@ -2076,7 +2096,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -2135,6 +2155,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -2143,6 +2164,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 2m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -2446,6 +2468,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -2497,7 +2520,7 @@ frontend: tls_cipher_suites: cipher1,cipher2 tls_min_version: VersionTLS12 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -2570,6 +2593,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -2578,6 +2602,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -2787,6 +2812,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -2831,7 +2857,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -2890,6 +2916,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -2898,6 +2925,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 2m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -3278,6 +3306,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -3323,7 +3352,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -3382,6 +3411,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -3390,6 +3420,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_addr: ${HASH_RING_INSTANCE_ADDR} @@ -3533,6 +3564,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -3578,7 +3610,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -3638,6 +3670,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -3646,6 +3679,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_addr: ${HASH_RING_INSTANCE_ADDR} @@ -3790,6 +3824,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -3836,7 +3871,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -3895,6 +3930,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -3903,6 +3939,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -4045,6 +4082,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, EnableRemoteReporting: true, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, @@ -4094,7 +4132,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -4153,6 +4191,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -4161,6 +4200,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -4338,6 +4378,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, ReadTimeout: 30 * time.Second, @@ -4383,7 +4424,7 @@ compactor: frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 @@ -4442,6 +4483,7 @@ limits_config: max_chunks_per_query: 2000000 max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: 500 cardinality_limit: 100000 max_streams_matchers_per_query: 1000 @@ -4450,6 +4492,7 @@ limits_config: per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true advertise_port: 7946 @@ -4627,6 +4670,7 @@ overrides: }, }, }, + Shippers: []string{"boltdb"}, HTTPTimeouts: HTTPTimeoutConfig{ IdleTimeout: 30 * time.Second, ReadTimeout: 30 * time.Second, @@ -4638,3 +4682,402 @@ overrides: require.YAMLEq(t, expCfg, string(cfg)) require.YAMLEq(t, expRCfg, string(rCfg)) } + +func defaultOptions() Options { + return Options{ + Stack: lokiv1.LokiStackSpec{ + Replication: &lokiv1.ReplicationSpec{ + Factor: 1, + }, + Limits: &lokiv1.LimitsSpec{ + Global: &lokiv1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1.IngestionLimitSpec{ + IngestionRate: 4, + IngestionBurstSize: 6, + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxGlobalStreamsPerTenant: 0, + MaxLineSize: 256000, + PerStreamRateLimit: 3, + PerStreamRateLimitBurst: 15, + }, + QueryLimits: &lokiv1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 5000, + MaxChunksPerQuery: 2000000, + MaxQuerySeries: 500, + QueryTimeout: "1m", + CardinalityLimit: 100000, + }, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + Compactor: Address{ + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + FrontendWorker: Address{ + FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + GossipRing: GossipRing{ + InstancePort: 9095, + BindPort: 7946, + MembersDiscoveryAddr: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local", + }, + Querier: Address{ + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, + }, + IndexGateway: Address{ + FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + StorageDirectory: "/tmp/loki", + MaxConcurrent: MaxConcurrent{ + AvailableQuerierCPUCores: 2, + }, + WriteAheadLog: WriteAheadLog{ + Directory: "/tmp/wal", + IngesterMemoryRequest: 5000, + }, + ObjectStorage: storage.Options{ + SharedStore: lokiv1.ObjectStorageSecretS3, + S3: &storage.S3StorageConfig{ + Endpoint: "http://test.default.svc.cluster.local.:9000", + Region: "us-east", + Buckets: "loki", + AccessKeyID: "test", + AccessKeySecret: "test123", + }, + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-01", + }, + }, + }, + Shippers: []string{"boltdb"}, + EnableRemoteReporting: true, + HTTPTimeouts: HTTPTimeoutConfig{ + IdleTimeout: 30 * time.Second, + ReadTimeout: 30 * time.Second, + WriteTimeout: 10 * time.Minute, + }, + } +} + +func TestBuild_ConfigAndRuntimeConfig_Schemas(t *testing.T) { + for _, tc := range []struct { + name string + schemaConfig []lokiv1.ObjectStorageSchema + shippers []string + expSchemaConfig string + expStorageConfig string + }{ + { + name: "default_config_v11_schema", + schemaConfig: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-01", + }, + }, + shippers: []string{"boltdb"}, + expSchemaConfig: ` + configs: + - from: "2020-10-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v11 + store: boltdb-shipper`, + expStorageConfig: ` + boltdb_shipper: + active_index_directory: /tmp/loki/index + cache_location: /tmp/loki/index_cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`, + }, + { + name: "v12_schema", + schemaConfig: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV12, + EffectiveDate: "2020-02-05", + }, + }, + shippers: []string{"boltdb"}, + expSchemaConfig: ` + configs: + - from: "2020-02-05" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v12 + store: boltdb-shipper`, + expStorageConfig: ` + boltdb_shipper: + active_index_directory: /tmp/loki/index + cache_location: /tmp/loki/index_cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`, + }, + { + name: "v13_schema", + schemaConfig: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV13, + EffectiveDate: "2024-01-01", + }, + }, + shippers: []string{"tsdb"}, + expSchemaConfig: ` + configs: + - from: "2024-01-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v13 + store: tsdb`, + expStorageConfig: ` + tsdb_shipper: + active_index_directory: /tmp/loki/tsdb-index + cache_location: /tmp/loki/tsdb-cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`, + }, + { + name: "multiple_schema", + schemaConfig: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-01-01", + }, + { + Version: lokiv1.ObjectStorageSchemaV12, + EffectiveDate: "2021-01-01", + }, + { + Version: lokiv1.ObjectStorageSchemaV13, + EffectiveDate: "2024-01-01", + }, + }, + shippers: []string{"boltdb", "tsdb"}, + expSchemaConfig: ` + configs: + - from: "2020-01-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v11 + store: boltdb-shipper + - from: "2021-01-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v12 + store: boltdb-shipper + - from: "2024-01-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v13 + store: tsdb`, + expStorageConfig: ` + boltdb_shipper: + active_index_directory: /tmp/loki/index + cache_location: /tmp/loki/index_cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095 + tsdb_shipper: + active_index_directory: /tmp/loki/tsdb-index + cache_location: /tmp/loki/tsdb-cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + expCfg := ` +--- +auth_enabled: true +chunk_store_config: + chunk_cache_config: + embedded_cache: + enabled: true + max_size_mb: 500 +common: + storage: + s3: + s3: http://test.default.svc.cluster.local.:9000 + bucketnames: loki + region: us-east + access_key_id: test + secret_access_key: test123 + s3forcepathstyle: true + compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 + ring: + kvstore: + store: memberlist + heartbeat_period: 5s + heartbeat_timeout: 1m + instance_port: 9095 +compactor: + compaction_interval: 2h + working_directory: /tmp/loki/compactor +frontend: + tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 + compress_responses: true + max_outstanding_per_tenant: 4096 + log_queries_longer_than: 5s +frontend_worker: + frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 + grpc_client_config: + max_send_msg_size: 104857600 + match_max_concurrent: true +ingester: + chunk_block_size: 262144 + chunk_encoding: snappy + chunk_idle_period: 1h + chunk_retain_period: 5m + chunk_target_size: 2097152 + flush_op_timeout: 10m + lifecycler: + final_sleep: 0s + join_after: 30s + num_tokens: 512 + ring: + replication_factor: 1 + max_chunk_age: 2h + max_transfer_retries: 0 + wal: + enabled: true + dir: /tmp/wal + replay_memory_ceiling: 2500 +ingester_client: + grpc_client_config: + max_recv_msg_size: 67108864 + remote_timeout: 1s +# NOTE: Keep the order of keys as in Loki docs +# to enable easy diffs when vendoring newer +# Loki releases. +# (See https://grafana.com/docs/loki/latest/configuration/#limits_config) +# +# Values for not exposed fields are taken from the grafana/loki production +# configuration manifests. +# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet) +limits_config: + ingestion_rate_strategy: global + ingestion_rate_mb: 4 + ingestion_burst_size_mb: 6 + max_label_name_length: 1024 + max_label_value_length: 2048 + max_label_names_per_series: 30 + reject_old_samples: true + reject_old_samples_max_age: 168h + creation_grace_period: 10m + enforce_metric_name: false + # Keep max_streams_per_user always to 0 to default + # using max_global_streams_per_user always. + # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73) + max_streams_per_user: 0 + max_line_size: 256000 + max_entries_limit_per_query: 5000 + max_global_streams_per_user: 0 + max_chunks_per_query: 2000000 + max_query_length: 721h + max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 + max_query_series: 500 + cardinality_limit: 100000 + max_streams_matchers_per_query: 1000 + max_cache_freshness_per_query: 10m + per_stream_rate_limit: 3MB + per_stream_rate_limit_burst: 15MB + split_queries_by_interval: 30m + query_timeout: 1m + allow_structured_metadata: true +memberlist: + abort_if_cluster_join_fails: true + advertise_port: 7946 + bind_port: 7946 + join_members: + - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946 + max_join_backoff: 1m + max_join_retries: 10 + min_join_backoff: 1s +querier: + engine: + max_look_back_period: 30s + extra_query_delay: 0s + max_concurrent: 2 + query_ingesters_within: 3h + tail_max_duration: 1h +query_range: + align_queries_with_step: true + cache_results: true + max_retries: 5 + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 500 + parallelise_shardable_queries: true +schema_config: +${SCHEMA_CONFIG} +server: + graceful_shutdown_timeout: 5s + grpc_server_min_time_between_pings: '10s' + grpc_server_ping_without_stream_allowed: true + grpc_server_max_concurrent_streams: 1000 + grpc_server_max_recv_msg_size: 104857600 + grpc_server_max_send_msg_size: 104857600 + http_listen_port: 3100 + http_server_idle_timeout: 30s + http_server_read_timeout: 30s + http_server_write_timeout: 10m0s + log_level: info +storage_config: +${STORAGE_CONFIG} +tracing: + enabled: false +analytics: + reporting_enabled: true +` + expCfg = strings.Replace(expCfg, "${SCHEMA_CONFIG}", tc.expSchemaConfig, 1) + expCfg = strings.Replace(expCfg, "${STORAGE_CONFIG}", tc.expStorageConfig, 1) + + opts := defaultOptions() + opts.ObjectStorage.Schemas = tc.schemaConfig + opts.Shippers = tc.shippers + + cfg, _, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, expCfg, string(cfg)) + }) + } +} diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml index 0c06a44ed16e4..e192083ab9900 100644 --- a/operator/internal/manifests/internal/config/loki-config.yaml +++ b/operator/internal/manifests/internal/config/loki-config.yaml @@ -104,7 +104,7 @@ frontend: tls_min_version: {{ .TLS.MinTLSVersion }} {{- end }} compress_responses: true - max_outstanding_per_tenant: 256 + max_outstanding_per_tenant: 4096 log_queries_longer_than: 5s frontend_worker: frontend_address: {{ .FrontendWorker.FQDN }}:{{ .FrontendWorker.Port }} @@ -184,6 +184,7 @@ limits_config: max_chunks_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxChunksPerQuery }} max_query_length: 721h max_query_parallelism: 32 + tsdb_max_query_parallelism: 512 max_query_series: {{ .Stack.Limits.Global.QueryLimits.MaxQuerySeries }} cardinality_limit: {{ .Stack.Limits.Global.QueryLimits.CardinalityLimit }} max_streams_matchers_per_query: 1000 @@ -203,6 +204,7 @@ limits_config: per_stream_rate_limit: {{ .Stack.Limits.Global.IngestionLimits.PerStreamRateLimit }}MB per_stream_rate_limit_burst: {{ .Stack.Limits.Global.IngestionLimits.PerStreamRateLimitBurst }}MB split_queries_by_interval: 30m + allow_structured_metadata: true {{- with .GossipRing }} memberlist: abort_if_cluster_join_fails: true @@ -254,7 +256,11 @@ schema_config: prefix: index_ object_store: {{ $store }} schema: {{ .Version }} + {{- if or (eq .Version "v11") (eq .Version "v12")}} store: boltdb-shipper + {{- else }} + store: tsdb + {{- end}} {{- end }} {{ if .Ruler.Enabled }} ruler: @@ -468,23 +474,32 @@ server: {{- end }} log_level: info storage_config: +{{- range $_, $ship := .Shippers }} +{{- if eq $ship "boltdb" }} boltdb_shipper: - active_index_directory: {{ .StorageDirectory }}/index - cache_location: {{ .StorageDirectory }}/index_cache + active_index_directory: {{ $.StorageDirectory }}/index + cache_location: {{ $.StorageDirectory }}/index_cache +{{- end }} +{{- if eq $ship "tsdb" }} + tsdb_shipper: + active_index_directory: {{ $.StorageDirectory }}/tsdb-index + cache_location: {{ $.StorageDirectory }}/tsdb-cache +{{- end }} cache_ttl: 24h resync_interval: 5m - shared_store: {{ .ObjectStorage.SharedStore }} + shared_store: {{ $.ObjectStorage.SharedStore }} index_gateway_client: - server_address: dns:///{{ .IndexGateway.FQDN }}:{{ .IndexGateway.Port }} -{{- if .Gates.GRPCEncryption }} + server_address: dns:///{{ $.IndexGateway.FQDN }}:{{ $.IndexGateway.Port }} +{{- if $.Gates.GRPCEncryption }} grpc_client_config: tls_enabled: true - tls_cert_path: {{ .TLS.Paths.GRPC.Certificate }} - tls_key_path: {{ .TLS.Paths.GRPC.Key }} - tls_ca_path: {{ .TLS.Paths.CA }} - tls_server_name: {{ .TLS.ServerNames.GRPC.IndexGateway }} - tls_cipher_suites: {{ .TLS.CipherSuitesString }} - tls_min_version: {{ .TLS.MinTLSVersion }} + tls_cert_path: {{ $.TLS.Paths.GRPC.Certificate }} + tls_key_path: {{ $.TLS.Paths.GRPC.Key }} + tls_ca_path: {{ $.TLS.Paths.CA }} + tls_server_name: {{ $.TLS.ServerNames.GRPC.IndexGateway }} + tls_cipher_suites: {{ $.TLS.CipherSuitesString }} + tls_min_version: {{ $.TLS.MinTLSVersion }} +{{- end }} {{- end }} tracing: enabled: false diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go index d82000034acc7..5829270244322 100644 --- a/operator/internal/manifests/internal/config/options.go +++ b/operator/internal/manifests/internal/config/options.go @@ -29,6 +29,7 @@ type Options struct { MaxConcurrent MaxConcurrent WriteAheadLog WriteAheadLog EnableRemoteReporting bool + Shippers []string ObjectStorage storage.Options From 5535267f2cd2c2a869aa28b05a78fe21dd9af37f Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Mon, 20 Nov 2023 12:55:14 +0100 Subject: [PATCH 18/48] operator: Remove outdated BoltDB dashboards (#11022) Co-authored-by: Periklis Tsirakidis --- operator/CHANGELOG.md | 3 +- .../grafana-dashboard-lokistack-reads.json | 273 ------------------ .../grafana-dashboard-lokistack-writes.json | 188 ------------ operator/jsonnet/config.libsonnet | 7 +- 4 files changed, 7 insertions(+), 464 deletions(-) diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 86b79e7d55911..8ee1f64ed48f5 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,9 +1,10 @@ ## Main +- [11022](https://github.com/grafana/loki/pull/11022) **JoaoBraveCoding**: Remove outdated BoltDB dashboards - [10932](https://github.com/grafana/loki/pull/10932) **JoaoBraveCoding**: Adds new value v13 to schema - [11232](https://github.com/grafana/loki/pull/11232) **periklis**: Update dependencies and dev tools - [11129](https://github.com/grafana/loki/pull/11129) **periklis**: Update deps to secure webhooks for CVE-2023-44487 - +- ## 0.5.0 (2023-10-24) - [10924](https://github.com/grafana/loki/pull/10924) **periklis**: Update Loki operand to v2.9.2 diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json index 09a8198cedf73..c2d89f8b0a877 100644 --- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json +++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json @@ -1113,279 +1113,6 @@ "showTitle": true, "title": "Index", "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - "1xx": "#EAB839", - "2xx": "#7EB26D", - "3xx": "#6ED0E0", - "4xx": "#EF843C", - "5xx": "#E24D42", - "error": "#E24D42", - "success": "#7EB26D" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{status}}", - "refId": "A", - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "QPS", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 17, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "99th Percentile", - "refId": "A", - "step": 10 - }, - { - "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "50th Percentile", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Average", - "refId": "C", - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Latency", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fieldConfig": { - "custom": { - "fillOpacity": 50, - "showPoints": "never", - "stacking": { - "group": "A", - "mode": "normal" - } - } - }, - "fill": 1, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99,\n sum(\n rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])\n ) by (pod, le)\n )\n", - "instant": false, - "legendFormat": "{{pod}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Per Pod Latency (p99)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "BoltDB Shipper", - "titleSize": "h6" } ], "schemaVersion": 14, diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json index d217299772e6b..d920ec075c68b 100644 --- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json +++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json @@ -585,194 +585,6 @@ "showTitle": true, "title": "Index", "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - "1xx": "#EAB839", - "2xx": "#7EB26D", - "3xx": "#6ED0E0", - "4xx": "#EF843C", - "5xx": "#E24D42", - "error": "#E24D42", - "success": "#7EB26D" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{status}}", - "refId": "A", - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "QPS", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "99th Percentile", - "refId": "A", - "step": 10 - }, - { - "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "50th Percentile", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Average", - "refId": "C", - "step": 10 - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeShift": null, - "title": "Latency", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "BoltDB Shipper", - "titleSize": "h6" } ], "schemaVersion": 14, diff --git a/operator/jsonnet/config.libsonnet b/operator/jsonnet/config.libsonnet index ec50b795a1de2..7651008e56011 100644 --- a/operator/jsonnet/config.libsonnet +++ b/operator/jsonnet/config.libsonnet @@ -181,7 +181,10 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn }, }, 'loki-reads.json'+: { - local dropList = ['BigTable', 'Ingester - Zone Aware'], + // We drop both BigTable and BlotDB dashboards as they have been + // replaced by the Index dashboards + local dropList = ['BigTable', 'Ingester - Zone Aware', 'BoltDB Shipper'], + uid: '62q5jjYwhVSaz4Mcrm8tV3My3gcKED', title: 'OpenShift Logging / LokiStack / Reads', @@ -220,7 +223,7 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn }, }, 'loki-writes.json'+: { - local dropList = ['Ingester - Zone Aware'], + local dropList = ['Ingester - Zone Aware', 'BoltDB Shipper'], uid: 'F6nRYKuXmFVpVSFQmXr7cgXy5j7UNr', title: 'OpenShift Logging / LokiStack / Writes', tags: defaultLokiTags(super.tags), From 6a62b8cf42f2813cb6d93ec76c39b6d8d75f822b Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Mon, 20 Nov 2023 18:22:03 +0530 Subject: [PATCH 19/48] compaction: Separate metrics for tracking retention and compaction (#11263) **What this PR does / why we need it**: In PR #9884, we separated the retention loop from compaction to avoid blocking compaction for too long due to some intensive delete requests. Currently, we track retention and compaction using the same metrics. This PR adds separate metrics for monitoring retention operation. I have also updated the Retention dashboard to use the new metrics. --- .../loki_micro_services_delete_test.go | 1 + pkg/compactor/compactor.go | 28 +- pkg/compactor/compactor_test.go | 40 +- pkg/compactor/metrics.go | 46 +- .../dashboards/loki-retention.json | 398 +++++++++++++++++- .../dashboards/loki-retention.json | 398 +++++++++++++++++- .../dashboards/loki-retention.libsonnet | 30 +- 7 files changed, 860 insertions(+), 81 deletions(-) diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go index 5cce134d94b2c..07195d919ee1f 100644 --- a/integration/loki_micro_services_delete_test.go +++ b/integration/loki_micro_services_delete_test.go @@ -216,6 +216,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) { validateQueryResponse := func(expectedStreams []client.StreamValues, resp *client.Response) { t.Helper() + assert.Equal(t, "success", resp.Status) assert.Equal(t, "streams", resp.Data.ResultType) require.Len(t, resp.Data.Stream, len(expectedStreams)) diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 64f89fc696b9f..774536152ca9d 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -128,6 +128,11 @@ func (cfg *Config) Validate() error { cfg.ApplyRetentionInterval = cfg.CompactionInterval } + if cfg.ApplyRetentionInterval == cfg.CompactionInterval { + // add some jitter to avoid running retention and compaction at same time + cfg.ApplyRetentionInterval += minDuration(10*time.Minute, cfg.ApplyRetentionInterval/2) + } + if err := config.ValidatePathPrefix(cfg.DeleteRequestStoreKeyPrefix); err != nil { return fmt.Errorf("validate delete store path prefix: %w", err) } @@ -604,7 +609,7 @@ func (c *Compactor) CompactTable(ctx context.Context, tableName string, applyRet } if hasUncompactedIndex { - c.metrics.skippedCompactingLockedTables.Inc() + c.metrics.skippedCompactingLockedTables.WithLabelValues(tableName).Inc() level.Warn(util_log.Logger).Log("msg", "skipped compacting table which likely has uncompacted index since it is locked by retention", "table_name", tableName) } return nil @@ -657,14 +662,19 @@ func (c *Compactor) RunCompaction(ctx context.Context, applyRetention bool) (err if err != nil { status = statusFailure } - withRetentionLabelValue := fmt.Sprintf("%v", applyRetention) - c.metrics.compactTablesOperationTotal.WithLabelValues(status, withRetentionLabelValue).Inc() + if applyRetention { + c.metrics.applyRetentionOperationTotal.WithLabelValues(status).Inc() + } else { + c.metrics.compactTablesOperationTotal.WithLabelValues(status).Inc() + } runtime := time.Since(start) if status == statusSuccess { - c.metrics.compactTablesOperationDurationSeconds.WithLabelValues(withRetentionLabelValue).Set(runtime.Seconds()) - c.metrics.compactTablesOperationLastSuccess.WithLabelValues(withRetentionLabelValue).SetToCurrentTime() if applyRetention { + c.metrics.applyRetentionOperationDurationSeconds.Set(runtime.Seconds()) c.metrics.applyRetentionLastSuccess.SetToCurrentTime() + } else { + c.metrics.compactTablesOperationDurationSeconds.Set(runtime.Seconds()) + c.metrics.compactTablesOperationLastSuccess.SetToCurrentTime() } } @@ -874,3 +884,11 @@ func schemaPeriodForTable(cfg config.SchemaConfig, tableName string) (config.Per return schemaCfg, true } + +func minDuration(x time.Duration, y time.Duration) time.Duration { + if x < y { + return x + } + + return y +} diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 6913956aaa70e..17df040290732 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -348,7 +348,7 @@ func TestCompactor_TableLocking(t *testing.T) { lockTable string applyRetention bool - compactionShouldTimeout bool + retentionShouldTimeout bool }{ { name: "no table locked - not applying retention", @@ -362,10 +362,10 @@ func TestCompactor_TableLocking(t *testing.T) { lockTable: fmt.Sprintf("%s%d", indexTablePrefix, tableNumEnd), }, { - name: "first table locked - applying retention", - lockTable: fmt.Sprintf("%s%d", indexTablePrefix, tableNumEnd), - applyRetention: true, - compactionShouldTimeout: true, + name: "first table locked - applying retention", + lockTable: fmt.Sprintf("%s%d", indexTablePrefix, tableNumEnd), + applyRetention: true, + retentionShouldTimeout: true, }, } { t.Run(tc.name, func(t *testing.T) { @@ -389,30 +389,38 @@ func TestCompactor_TableLocking(t *testing.T) { defer cancel() err := compactor.RunCompaction(ctx, tc.applyRetention) - // compaction should not timeout after first run since we won't be locking the table - if n == 1 && tc.compactionShouldTimeout { + // retention should not timeout after first run since we won't be locking the table + if n == 1 && tc.retentionShouldTimeout { require.ErrorIs(t, err, context.DeadlineExceeded) - require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusFailure, "true"))) - require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusFailure, "false"))) + require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusFailure))) + require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusFailure))) return } require.NoError(t, err) - if n > 1 && tc.compactionShouldTimeout { - // this should be the first successful run if compaction was expected to be timeout out during first run - require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess, fmt.Sprintf("%v", tc.applyRetention)))) + if n > 1 && tc.applyRetention && tc.retentionShouldTimeout { + // this should be the first successful run if retention was expected to timeout out during first run + require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusSuccess))) } else { // else it should have succeeded during all the n runs - require.Equal(t, float64(n), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess, fmt.Sprintf("%v", tc.applyRetention)))) + if tc.applyRetention { + require.Equal(t, float64(n), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusSuccess))) + } else { + require.Equal(t, float64(n), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess))) + } + } + if tc.applyRetention { + require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess))) + } else { + require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.applyRetentionOperationTotal.WithLabelValues(statusSuccess))) } - require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.compactTablesOperationTotal.WithLabelValues(statusSuccess, fmt.Sprintf("%v", !tc.applyRetention)))) // if the table was locked and compaction ran without retention then only locked table should have been skipped if tc.lockTable != "" { if tc.applyRetention { - require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables)) + require.Equal(t, float64(0), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables.WithLabelValues(tc.lockTable))) } else { - require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables)) + require.Equal(t, float64(1), testutil.ToFloat64(compactor.metrics.skippedCompactingLockedTables.WithLabelValues(tc.lockTable))) } } diff --git a/pkg/compactor/metrics.go b/pkg/compactor/metrics.go index 7cbf404c81848..28b205789693c 100644 --- a/pkg/compactor/metrics.go +++ b/pkg/compactor/metrics.go @@ -8,17 +8,17 @@ import ( const ( statusFailure = "failure" statusSuccess = "success" - - lblWithRetention = "with_retention" ) type metrics struct { - compactTablesOperationTotal *prometheus.CounterVec - compactTablesOperationDurationSeconds *prometheus.GaugeVec - compactTablesOperationLastSuccess *prometheus.GaugeVec - applyRetentionLastSuccess prometheus.Gauge - compactorRunning prometheus.Gauge - skippedCompactingLockedTables prometheus.Counter + compactTablesOperationTotal *prometheus.CounterVec + compactTablesOperationDurationSeconds prometheus.Gauge + compactTablesOperationLastSuccess prometheus.Gauge + applyRetentionOperationTotal *prometheus.CounterVec + applyRetentionOperationDurationSeconds prometheus.Gauge + applyRetentionLastSuccess prometheus.Gauge + compactorRunning prometheus.Gauge + skippedCompactingLockedTables *prometheus.CounterVec } func newMetrics(r prometheus.Registerer) *metrics { @@ -26,18 +26,28 @@ func newMetrics(r prometheus.Registerer) *metrics { compactTablesOperationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ Namespace: "loki_boltdb_shipper", Name: "compact_tables_operation_total", - Help: "Total number of tables compaction done by status and with/without retention", - }, []string{"status", lblWithRetention}), - compactTablesOperationDurationSeconds: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ + Help: "Total number of tables compaction done by status", + }, []string{"status"}), + compactTablesOperationDurationSeconds: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Namespace: "loki_boltdb_shipper", Name: "compact_tables_operation_duration_seconds", - Help: "Time (in seconds) spent in compacting all the tables with/without retention", - }, []string{lblWithRetention}), - compactTablesOperationLastSuccess: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{ + Help: "Time (in seconds) spent in compacting all the tables", + }), + compactTablesOperationLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Namespace: "loki_boltdb_shipper", Name: "compact_tables_operation_last_successful_run_timestamp_seconds", Help: "Unix timestamp of the last successful compaction run", - }, []string{lblWithRetention}), + }), + applyRetentionOperationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: "loki_compactor", + Name: "apply_retention_operation_total", + Help: "Total number of attempts done to apply retention with status", + }, []string{"status"}), + applyRetentionOperationDurationSeconds: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: "loki_compactor", + Name: "apply_retention_operation_duration_seconds", + Help: "Time (in seconds) spent in applying retention", + }), applyRetentionLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Namespace: "loki_boltdb_shipper", Name: "apply_retention_last_successful_run_timestamp_seconds", @@ -48,11 +58,11 @@ func newMetrics(r prometheus.Registerer) *metrics { Name: "compactor_running", Help: "Value will be 1 if compactor is currently running on this instance", }), - skippedCompactingLockedTables: promauto.With(r).NewCounter(prometheus.CounterOpts{ + skippedCompactingLockedTables: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ Namespace: "loki_compactor", - Name: "skipped_compacting_locked_tables_total", + Name: "skipped_compacting_locked_table_total", Help: "Count of uncompacted tables being skipped due to them being locked by retention", - }), + }, []string{"table_name"}), } return &m diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json index 73791bf2b11e1..95bc7b6e0f83b 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json @@ -375,7 +375,7 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 6, "stack": false, "steppedLine": false, "targets": [ @@ -389,7 +389,7 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Last Compact and Mark Operation Success", + "title": "Last Compact Tables Operation Success", "tooltip": { "shared": true, "sort": 2, @@ -449,7 +449,7 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 6, "stack": false, "steppedLine": false, "targets": [ @@ -465,7 +465,7 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Compact and Mark Operations Duration", + "title": "Compact Tables Operations Duration", "tooltip": { "shared": true, "sort": 2, @@ -497,7 +497,19 @@ "show": false } ] - }, + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Compaction", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ { "aliasColors": { }, "bars": false, @@ -525,7 +537,83 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{table_name}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Number of times Tables were skipped during Compaction", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 6, "stack": false, "steppedLine": false, "targets": [ @@ -541,7 +629,279 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Compact and Mark Operations Per Status", + "title": "Compact Tables Operations Per Status", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "blue", + "mode": "fixed" + }, + "custom": { }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "dateTimeFromNow" + } + }, + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { }, + "textMode": "auto" + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "loki_compactor_apply_retention_last_successful_run_timestamp_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"} * 1e3", + "format": "time_series", + "instant": true, + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Last Mark Operation Success", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "stat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "loki_compactor_apply_retention_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "duration", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Mark Operations Duration", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (status)(rate(loki_compactor_apply_retention_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{success}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Mark Operations Per Status", "tooltip": { "shared": true, "sort": 2, @@ -579,7 +939,7 @@ "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Compact and Mark", + "title": "Retention", "titleSize": "h6" }, { @@ -593,7 +953,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 7, + "id": 11, "legend": { "avg": false, "current": false, @@ -669,7 +1029,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 8, + "id": 12, "legend": { "avg": false, "current": false, @@ -745,7 +1105,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 9, + "id": 13, "legend": { "avg": false, "current": false, @@ -834,7 +1194,7 @@ "datasource": "$datasource", "fill": 1, "format": "short", - "id": 10, + "id": 14, "legend": { "avg": false, "current": false, @@ -909,7 +1269,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 11, + "id": 15, "legend": { "avg": false, "current": false, @@ -1014,7 +1374,7 @@ "datasource": "$datasource", "fill": 1, "format": "short", - "id": 12, + "id": 16, "legend": { "avg": false, "current": false, @@ -1089,7 +1449,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 13, + "id": 17, "legend": { "avg": false, "current": false, @@ -1193,7 +1553,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 14, + "id": 18, "legend": { "avg": false, "current": false, @@ -1269,7 +1629,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 15, + "id": 19, "legend": { "avg": false, "current": false, @@ -1345,7 +1705,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 16, + "id": 20, "legend": { "avg": false, "current": false, @@ -1428,7 +1788,7 @@ "panels": [ { "datasource": "$loki_datasource", - "id": 17, + "id": 21, "span": 12, "targets": [ { diff --git a/production/loki-mixin-compiled/dashboards/loki-retention.json b/production/loki-mixin-compiled/dashboards/loki-retention.json index fc8f9e5619757..a266d15734208 100644 --- a/production/loki-mixin-compiled/dashboards/loki-retention.json +++ b/production/loki-mixin-compiled/dashboards/loki-retention.json @@ -375,7 +375,7 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 6, "stack": false, "steppedLine": false, "targets": [ @@ -389,7 +389,7 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Last Compact and Mark Operation Success", + "title": "Last Compact Tables Operation Success", "tooltip": { "shared": true, "sort": 2, @@ -449,7 +449,7 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 6, "stack": false, "steppedLine": false, "targets": [ @@ -465,7 +465,7 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Compact and Mark Operations Duration", + "title": "Compact Tables Operations Duration", "tooltip": { "shared": true, "sort": 2, @@ -497,7 +497,19 @@ "show": false } ] - }, + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Compaction", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ { "aliasColors": { }, "bars": false, @@ -525,7 +537,83 @@ "renderer": "flot", "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{table_name}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Number of times Tables were skipped during Compaction", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 6, "stack": false, "steppedLine": false, "targets": [ @@ -541,7 +629,279 @@ "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Compact and Mark Operations Per Status", + "title": "Compact Tables Operations Per Status", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "blue", + "mode": "fixed" + }, + "custom": { }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "dateTimeFromNow" + } + }, + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { }, + "textMode": "auto" + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "loki_compactor_apply_retention_last_successful_run_timestamp_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"} * 1e3", + "format": "time_series", + "instant": true, + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Last Mark Operation Success", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "stat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "loki_compactor_apply_retention_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "duration", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Mark Operations Duration", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (status)(rate(loki_compactor_apply_retention_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{success}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Mark Operations Per Status", "tooltip": { "shared": true, "sort": 2, @@ -579,7 +939,7 @@ "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Compact and Mark", + "title": "Retention", "titleSize": "h6" }, { @@ -593,7 +953,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 7, + "id": 11, "legend": { "avg": false, "current": false, @@ -669,7 +1029,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 8, + "id": 12, "legend": { "avg": false, "current": false, @@ -745,7 +1105,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 9, + "id": 13, "legend": { "avg": false, "current": false, @@ -834,7 +1194,7 @@ "datasource": "$datasource", "fill": 1, "format": "short", - "id": 10, + "id": 14, "legend": { "avg": false, "current": false, @@ -909,7 +1269,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 11, + "id": 15, "legend": { "avg": false, "current": false, @@ -1014,7 +1374,7 @@ "datasource": "$datasource", "fill": 1, "format": "short", - "id": 12, + "id": 16, "legend": { "avg": false, "current": false, @@ -1089,7 +1449,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 13, + "id": 17, "legend": { "avg": false, "current": false, @@ -1193,7 +1553,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 14, + "id": 18, "legend": { "avg": false, "current": false, @@ -1269,7 +1629,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 15, + "id": 19, "legend": { "avg": false, "current": false, @@ -1345,7 +1705,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 16, + "id": 20, "legend": { "avg": false, "current": false, @@ -1428,7 +1788,7 @@ "panels": [ { "datasource": "$loki_datasource", - "id": 17, + "id": 21, "span": 12, "targets": [ { diff --git a/production/loki-mixin/dashboards/loki-retention.libsonnet b/production/loki-mixin/dashboards/loki-retention.libsonnet index 8e28ccdb0e7a7..a5aa45a13d756 100644 --- a/production/loki-mixin/dashboards/loki-retention.libsonnet +++ b/production/loki-mixin/dashboards/loki-retention.libsonnet @@ -25,20 +25,42 @@ local utils = import 'mixin-utils/utils.libsonnet'; ) .addRow( - $.row('Compact and Mark') + $.row('Compaction') .addPanel( - $.fromNowPanel('Last Compact and Mark Operation Success', 'loki_boltdb_shipper_compact_tables_operation_last_successful_run_timestamp_seconds') + $.fromNowPanel('Last Compact Tables Operation Success', 'loki_boltdb_shipper_compact_tables_operation_last_successful_run_timestamp_seconds') ) .addPanel( - $.panel('Compact and Mark Operations Duration') + + $.panel('Compact Tables Operations Duration') + $.queryPanel(['loki_boltdb_shipper_compact_tables_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']) + { yaxes: $.yaxes('s') }, ) + ) + .addRow( + $.row('') .addPanel( - $.panel('Compact and Mark Operations Per Status') + + $.panel('Number of times Tables were skipped during Compaction') + + $.queryPanel(['sum(increase(loki_compactor_skipped_compacting_locked_table_total{%s}[$__range]))' % $.namespaceMatcher()], ['{{table_name}}']), + ) + .addPanel( + $.panel('Compact Tables Operations Per Status') + $.queryPanel(['sum by (status)(rate(loki_boltdb_shipper_compact_tables_operation_total{%s}[$__rate_interval]))' % $.namespaceMatcher()], ['{{success}}']), ) ) + .addRow( + $.row('Retention') + .addPanel( + $.fromNowPanel('Last Mark Operation Success', 'loki_compactor_apply_retention_last_successful_run_timestamp_seconds') + ) + .addPanel( + $.panel('Mark Operations Duration') + + $.queryPanel(['loki_compactor_apply_retention_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']) + + { yaxes: $.yaxes('s') }, + ) + .addPanel( + $.panel('Mark Operations Per Status') + + $.queryPanel(['sum by (status)(rate(loki_compactor_apply_retention_operation_total{%s}[$__rate_interval]))' % $.namespaceMatcher()], ['{{success}}']), + ) + ) .addRow( $.row('Per Table Marker') .addPanel( From 8d34f857bcb41120fca71948f9b7dc3504047228 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Mon, 20 Nov 2023 17:15:06 +0100 Subject: [PATCH 20/48] Propagate trace ID with HTTP gRPC request. (#11251) The changes in https://github.com/grafana/loki/pull/10688 did not propage the trace ID from the context. `Frontend.RoundTripGRPC` would inject the trace ID into the request. That's not done in `Frontend.Do`. This changes extends the `codec.EncodeRequest` to inject the trace ID there. This is more inline with other metadata. --- pkg/loki/modules.go | 1 + pkg/querier/queryrange/codec.go | 9 +++++++++ pkg/querier/queryrange/instrument.go | 16 ++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 4c14a4872655f..e0e8ab4d1f88d 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -519,6 +519,7 @@ func (t *Loki) initQuerier() (services.Service, error) { internalMiddlewares := []queryrangebase.Middleware{ serverutil.RecoveryMiddleware, queryrange.Instrument{Metrics: t.Metrics}, + queryrange.Tracer{}, } if t.supportIndexDeleteRequest() && t.Cfg.CompactorConfig.RetentionEnabled { internalMiddlewares = append( diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 0cf3c06c22639..2167a5134b602 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -579,6 +579,15 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht } header.Set(user.OrgIDHeaderName, orgID) + // Propagate trace context in request. + tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) + if tracer != nil && span != nil { + carrier := opentracing.HTTPHeadersCarrier(header) + if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier); err != nil { + return nil, err + } + } + switch request := r.(type) { case *LokiRequest: params := url.Values{ diff --git a/pkg/querier/queryrange/instrument.go b/pkg/querier/queryrange/instrument.go index 8c32fad4ca304..497cfb2dd8a1a 100644 --- a/pkg/querier/queryrange/instrument.go +++ b/pkg/querier/queryrange/instrument.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/instrument" "github.com/grafana/dskit/middleware" + "github.com/opentracing/opentracing-go" "github.com/grafana/dskit/server" @@ -52,3 +53,18 @@ func (i Instrument) observe(ctx context.Context, route string, err error, durati } instrument.ObserveWithExemplar(ctx, i.RequestDuration.WithLabelValues(method, route, respStatus, "false"), duration.Seconds()) } + +type Tracer struct{} + +var _ queryrangebase.Middleware = Tracer{} + +// Wrap implements the queryrangebase.Middleware +func (t Tracer) Wrap(next queryrangebase.Handler) queryrangebase.Handler { + return queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + route := DefaultCodec.Path(r) + route = middleware.MakeLabelValue(route) + span, ctx := opentracing.StartSpanFromContext(ctx, route) + defer span.Finish() + return next.Do(ctx, r) + }) +} From 5f4a719bb62499d1e688ad0d0800050e416b9fa3 Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Mon, 20 Nov 2023 16:46:15 -0500 Subject: [PATCH 21/48] tokenizer v1 cleanup (#11272) **What this PR does / why we need it**: Removes all usage of v1 tokenizers, renames v2 to v1 since we never released this in a production way. **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --- pkg/storage/bloom/v1/bloom_tokenizer.go | 87 +-- pkg/storage/bloom/v1/bloom_tokenizer_test.go | 100 +-- pkg/storage/bloom/v1/tokenizer.go | 175 +----- pkg/storage/bloom/v1/tokenizer_test.go | 621 ++----------------- tools/tsdb/bloom-tester/lib.go | 20 +- tools/tsdb/bloom-tester/lib_test.go | 262 ++------ tools/tsdb/bloom-tester/metrics.go | 4 +- tools/tsdb/bloom-tester/readlib.go | 36 +- tools/tsdb/bloom-tester/readlib_test.go | 15 +- 9 files changed, 183 insertions(+), 1137 deletions(-) diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index 26ebd63006383..c5dd5e514507f 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -2,6 +2,7 @@ package v1 import ( "context" + "encoding/binary" "math" "time" @@ -27,9 +28,8 @@ Bloom filters are utilized for faster lookups of log lines. type BloomTokenizer struct { metrics *metrics - lineTokenizer Tokenizer - chunkIDTokenizer *WrappedTokenizer - cache map[string]interface{} + lineTokenizer *NGramTokenizer + cache map[string]interface{} } const CacheSize = 150000 @@ -46,17 +46,15 @@ func NewBloomTokenizer(reg prometheus.Registerer) (*BloomTokenizer, error) { metrics: newMetrics(reg), } t.cache = make(map[string]interface{}, CacheSize) - t.lineTokenizer = NewNGramTokenizer(DefaultNGramLength, DefaultNGramLength+1, DefaultNGramSkip) // default to 4-grams, no skip - t.chunkIDTokenizer = ChunkIDTokenizer(t.lineTokenizer) + t.lineTokenizer = NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip) // default to 4-grams, no skip level.Info(util_log.Logger).Log("bloom tokenizer created") return t, nil } -func (bt *BloomTokenizer) SetLineTokenizer(t Tokenizer) { +func (bt *BloomTokenizer) SetLineTokenizer(t *NGramTokenizer) { bt.lineTokenizer = t - bt.chunkIDTokenizer = ChunkIDTokenizer(bt.lineTokenizer) } // TODO: Something real here with metrics @@ -70,12 +68,27 @@ func clearCache(cache map[string]interface{}) { } } +func calculatePrefix(chk logproto.ChunkRef) []byte { + i64buf := make([]byte, binary.MaxVarintLen64) + i32buf := make([]byte, 4) + prefix := make([]byte, 32) + + binary.PutVarint(i64buf, int64(chk.From)) + prefix = append(prefix, i64buf...) + binary.PutVarint(i64buf, int64(chk.Through)) + prefix = append(prefix, i64buf...) + binary.LittleEndian.PutUint32(i32buf, chk.Checksum) + prefix = append(prefix, i32buf...) + + return prefix +} + // PopulateSeriesWithBloom is intended to be called on the write path, and is used to populate the bloom filter for a given series. func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks []chunk.Chunk) { clearCache(bt.cache) for idx := range chunks { lc := chunks[idx].Data.(*chunkenc.Facade).LokiChunk() - bt.chunkIDTokenizer.Reinit(chunks[idx].ChunkRef) + prefix := calculatePrefix(chunks[idx].ChunkRef) // TODO: error handling itr, err := lc.Iterator( @@ -93,16 +106,33 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo defer itr.Close() for itr.Next() && itr.Error() == nil { - toks := bt.chunkIDTokenizer.Tokens(itr.Entry().Line) + chunkTokenizer := NewPrefixedTokenIter(prefix, bt.lineTokenizer.Tokens(itr.Entry().Line)) + for chunkTokenizer.Next() { + tok := chunkTokenizer.At() + if tok != nil { + str := string(tok) + _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters + if !found { + bt.cache[str] = nil + + seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) - for _, tok := range toks { - if tok.Key != nil { - str := string(tok.Key) + if len(bt.cache) >= CacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other + clearCache(bt.cache) + } + } + } + } + lineTokenizer := bt.lineTokenizer.Tokens(itr.Entry().Line) + for lineTokenizer.Next() { + tok := lineTokenizer.At() + if tok != nil { + str := string(tok) _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters if !found { bt.cache[str] = nil - seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok.Key) + seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) if len(bt.cache) >= CacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other clearCache(bt.cache) @@ -110,6 +140,7 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo } } } + } seriesWithBloom.Series.Chunks = append(seriesWithBloom.Series.Chunks, ChunkRef{ Start: chunks[idx].From, @@ -118,33 +149,3 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo }) } // for each chunk } - -// SearchesForTokenizerAndLine is for taking a given search string (ex: on the read/query path) and returning -// all the possible tokens, given a tokenizer. -// This is a multi-dimensional slice where the first slice is the offset into the line, and the -// second slice is the tokens for that offset. If an offset into the line returns no tokens, this first dimension -// will be less than 1 + the number of skips specified in the tokenizer -// The offset is used if the Tokenizer has a skip value being utilized. -func SearchesForTokenizerAndLine(t Tokenizer, line string) (res [][]Token) { - res = make([][]Token, 0, 10) - for i := range line { // iterate by runes - if i >= t.GetSkip()+1 { - break - } - tmpTokens := make([]Token, 0, 100) - tokens := t.Tokens(line[i:]) - // As the way the tokenizer is coded, it will reuse its internal buffers, - // but we need to save the data, hence the need for copying - for _, token := range tokens { - tmpToken := Token{} - tmpToken.Key = make([]byte, len(token.Key)) - copy(tmpToken.Key, token.Key) - tmpTokens = append(tmpTokens, tmpToken) - } - if len(tokens) > 0 { - res = append(res, tmpTokens) - } - } - - return res -} diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index 034301f88c1aa..104524da479f7 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -20,95 +20,21 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +var ( + four = NewNGramTokenizer(4, 0) +) + func TestSetLineTokenizer(t *testing.T) { bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) // Validate defaults - require.Equal(t, bt.lineTokenizer.GetMin(), DefaultNGramLength) - require.Equal(t, bt.lineTokenizer.GetMax(), DefaultNGramLength+1) - require.Equal(t, bt.lineTokenizer.GetSkip(), DefaultNGramSkip) - - require.Equal(t, bt.chunkIDTokenizer.GetMin(), DefaultNGramLength) - require.Equal(t, bt.chunkIDTokenizer.GetMax(), DefaultNGramLength+1) - require.Equal(t, bt.chunkIDTokenizer.GetSkip(), DefaultNGramSkip) + require.Equal(t, bt.lineTokenizer.N, DefaultNGramLength) + require.Equal(t, bt.lineTokenizer.Skip, DefaultNGramSkip) // Set new tokenizer, and validate against that - bt.SetLineTokenizer(NewNGramTokenizer(6, 7, 2)) - require.Equal(t, bt.lineTokenizer.GetMin(), 6) - require.Equal(t, bt.lineTokenizer.GetMax(), 7) - require.Equal(t, bt.lineTokenizer.GetSkip(), 2) - - require.Equal(t, bt.chunkIDTokenizer.GetMin(), 6) - require.Equal(t, bt.chunkIDTokenizer.GetMax(), 7) - require.Equal(t, bt.chunkIDTokenizer.GetSkip(), 2) -} - -func TestSearchesForTokenizerAndLine(t *testing.T) { - for _, tc := range []struct { - desc string - input string - t Tokenizer - exp [][]Token - }{ - { - desc: "empty", - input: "", - t: four, - exp: [][]Token{}, - }, - { - desc: "single char", - input: "a", - t: four, - exp: [][]Token{}, - }, - { - desc: "four chars", - input: "abcd", - t: four, - exp: [][]Token{ - {{Key: []byte("abcd")}}}, - }, - { - desc: "uuid partial", - input: "2b1a5e46-36a2-4", - t: four, - exp: [][]Token{{ - {Key: []byte("2b1a")}, - {Key: []byte("b1a5")}, - {Key: []byte("1a5e")}, - {Key: []byte("a5e4")}, - {Key: []byte("5e46")}, - {Key: []byte("e46-")}, - {Key: []byte("46-3")}, - {Key: []byte("6-36")}, - {Key: []byte("-36a")}, - {Key: []byte("36a2")}, - {Key: []byte("6a2-")}, - {Key: []byte("a2-4")}}, - }, - }, - { - desc: "short special chars", - t: four, - input: "日本語", - exp: [][]Token{}, - }, - { - desc: "longer special chars", - t: four, - input: "日本語日本語", - exp: [][]Token{{ - {Key: []byte("日本語日")}, - {Key: []byte("本語日本")}, - {Key: []byte("語日本語")}}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, SearchesForTokenizerAndLine(tc.t, tc.input)) - }) - } - + bt.SetLineTokenizer(NewNGramTokenizer(6, 7)) + require.Equal(t, bt.lineTokenizer.N, 6) + require.Equal(t, bt.lineTokenizer.Skip, 7) } func TestPopulateSeriesWithBloom(t *testing.T) { @@ -149,9 +75,11 @@ func TestPopulateSeriesWithBloom(t *testing.T) { } bt.PopulateSeriesWithBloom(&swb, chunks) - tokens := SearchesForTokenizerAndLine(four, testLine) - for _, token := range tokens[0] { - require.True(t, swb.Bloom.Test(token.Key)) + tokenizer := NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip) + itr := tokenizer.Tokens(testLine) + for itr.Next() { + token := itr.At() + require.True(t, swb.Bloom.Test(token)) } } diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go index e27fa04e312f5..4582317809449 100644 --- a/pkg/storage/bloom/v1/tokenizer.go +++ b/pkg/storage/bloom/v1/tokenizer.go @@ -1,100 +1,9 @@ package v1 import ( - "encoding/binary" "unicode/utf8" - - "github.com/grafana/loki/pkg/logproto" ) -type Token struct { - Key []byte -} - -type Tokenizer interface { - Tokens(line string) []Token - GetSkip() int - GetMin() int - GetMax() int -} - -const TokenBufferSize = 4096 -const TokenKeySize = 132 - -type NgramTokenizer struct { - // [min,max) exclusivity - min, max, skip int - buffers [][]rune // circular buffers used for ngram generation - runeBuffer []byte // buffer used for token generation - internalTokenBuffer []Token // circular buffer for tokens -} - -/* -N-Grams (https://en.wikipedia.org/wiki/N-gram) are a series of 'n' adjacent characters in a string. -These will be utilized for the bloom filters to allow for fuzzy searching. -*/ -func NewNGramTokenizer(min, max, skip int) *NgramTokenizer { - capacity := max - min - t := &NgramTokenizer{ - min: min, - max: max, - skip: skip, - buffers: make([][]rune, capacity), - runeBuffer: make([]byte, 0, max*4), - internalTokenBuffer: make([]Token, 0, TokenBufferSize), - } - - for i := range t.buffers { - t.buffers[i] = make([]rune, t.min+i) - } - - for i := 0; i < cap(t.internalTokenBuffer); i++ { - t.internalTokenBuffer = append(t.internalTokenBuffer, Token{Key: make([]byte, 0, TokenKeySize)}) - } - - return t -} - -func (t *NgramTokenizer) GetSkip() int { - return t.skip -} - -func (t *NgramTokenizer) GetMin() int { - return t.min -} - -func (t *NgramTokenizer) GetMax() int { - return t.max -} - -func (t *NgramTokenizer) Tokens(line string) []Token { - var i int // rune index (not position that is measured in the range loop) - numToks := 0 - for _, r := range line { - - // j is the index of the buffer to use - for j := 0; j < (t.max - t.min); j++ { - // n is the length of the ngram - n := j + t.min - // pos is the position in the buffer to overwrite - pos := i % n - t.buffers[j][pos] = r - - if i >= n-1 && (i+1-n)%(t.skip+1) == 0 { - t.runeBuffer = reassemble(t.buffers[j], len(t.buffers[j]), (i+1)%n, t.runeBuffer) - if numToks >= cap(t.internalTokenBuffer) || numToks == len(t.internalTokenBuffer) { - t.internalTokenBuffer = append(t.internalTokenBuffer, Token{Key: make([]byte, 0, TokenKeySize)}) - } - t.internalTokenBuffer[numToks].Key = t.internalTokenBuffer[numToks].Key[:0] - t.internalTokenBuffer[numToks].Key = append(t.internalTokenBuffer[numToks].Key, t.runeBuffer...) - numToks++ - } - } - i++ - } - return t.internalTokenBuffer[0:numToks] -} - func reassemble(buf []rune, ln, pos int, result []byte) []byte { result = result[:0] // Reset the result slice for i := 0; i < ln; i++ { @@ -104,75 +13,9 @@ func reassemble(buf []rune, ln, pos int, result []byte) []byte { return result } -func chunkIDTransformer(tok Token, prefix []byte) Token { - tok.Key = append(append(tok.Key, prefix...), tok.Key...)[len(tok.Key):] - return tok -} - -type WrappedTokenizer struct { - t Tokenizer - tokenBuffer []Token - prefix []byte - i64buf []byte - i32buf []byte -} - -func (w *WrappedTokenizer) Tokens(line string) []Token { - w.tokenBuffer = w.tokenBuffer[:0] // Reset the result slice - toks := w.t.Tokens(line) - for _, tok := range toks { - w.tokenBuffer = append(w.tokenBuffer, chunkIDTransformer(tok, w.prefix), tok) - } - - return w.tokenBuffer -} - -func (w *WrappedTokenizer) GetSkip() int { - return w.t.GetSkip() -} - -func (w *WrappedTokenizer) GetMin() int { - return w.t.GetMin() -} - -func (w *WrappedTokenizer) GetMax() int { - return w.t.GetMax() -} - -func ChunkIDTokenizer(t Tokenizer) *WrappedTokenizer { - p := make([]byte, 0, 256) - return &WrappedTokenizer{ - t: t, - tokenBuffer: make([]Token, 0, TokenBufferSize), - prefix: p, - i64buf: make([]byte, binary.MaxVarintLen64), - i32buf: make([]byte, 4), - } -} - -func zeroBuffer(buf []byte) { - for i := range buf { - buf[i] = 0 - } -} - -func (w *WrappedTokenizer) Reinit(chk logproto.ChunkRef) { - w.prefix = w.prefix[:0] - zeroBuffer(w.i64buf) - zeroBuffer(w.i32buf) - - binary.PutVarint(w.i64buf, int64(chk.From)) - w.prefix = append(w.prefix, w.i64buf...) - binary.PutVarint(w.i64buf, int64(chk.Through)) - w.prefix = append(w.prefix, w.i64buf...) - binary.LittleEndian.PutUint32(w.i32buf, chk.Checksum) - w.prefix = append(w.prefix, w.i32buf...) -} - // Iterable variants (more performant, less space) - -type NGramTokenizerV2 struct { - n, skip int +type NGramTokenizer struct { + N, Skip int buffer []rune // circular buffer used for ngram generation res []byte // buffer used for token generation } @@ -181,10 +24,10 @@ type NGramTokenizerV2 struct { N-Grams (https://en.wikipedia.org/wiki/N-gram) are a series of 'n' adjacent characters in a string. These will be utilized for the bloom filters to allow for fuzzy searching. */ -func NewNGramTokenizerV2(n, skip int) *NGramTokenizerV2 { - t := &NGramTokenizerV2{ - n: n, - skip: skip, +func NewNGramTokenizer(n, skip int) *NGramTokenizer { + t := &NGramTokenizer{ + N: n, + Skip: skip, buffer: make([]rune, n+skip), res: make([]byte, 0, n*4), // maximum 4 bytes per rune } @@ -194,10 +37,10 @@ func NewNGramTokenizerV2(n, skip int) *NGramTokenizerV2 { // The Token iterator uses shared buffers for performance. The []byte returned by At() // is not safe for use after subsequent calls to Next() -func (t *NGramTokenizerV2) Tokens(line string) NGramTokenIter { +func (t *NGramTokenizer) Tokens(line string) NGramTokenIter { return NGramTokenIter{ - n: t.n, - skip: t.skip, + n: t.N, + skip: t.Skip, line: line, diff --git a/pkg/storage/bloom/v1/tokenizer_test.go b/pkg/storage/bloom/v1/tokenizer_test.go index a0becd464646a..3532c28a4f603 100644 --- a/pkg/storage/bloom/v1/tokenizer_test.go +++ b/pkg/storage/bloom/v1/tokenizer_test.go @@ -1,43 +1,36 @@ package v1 import ( - "bufio" - "encoding/binary" - "os" "testing" - "github.com/grafana/loki/pkg/logproto" - "github.com/stretchr/testify/require" ) const BigFile = "../../../logql/sketch/testdata/war_peace.txt" -var ( - twoSkipOne = NewNGramTokenizer(2, 3, 1) - three = NewNGramTokenizer(3, 4, 0) - threeSkip1 = NewNGramTokenizer(3, 4, 1) - threeSkip2 = NewNGramTokenizer(3, 4, 2) - four = NewNGramTokenizer(4, 5, 0) - fourSkip1 = NewNGramTokenizer(4, 5, 1) - fourSkip2 = NewNGramTokenizer(4, 5, 2) - five = NewNGramTokenizer(5, 6, 0) - six = NewNGramTokenizer(6, 7, 0) -) - func TestNGramIterator(t *testing.T) { var ( - three = NewNGramTokenizerV2(3, 0) - threeSkip1 = NewNGramTokenizerV2(3, 1) - threeSkip3 = NewNGramTokenizerV2(3, 3) + three = NewNGramTokenizer(3, 0) + threeSkip1 = NewNGramTokenizer(3, 1) + threeSkip3 = NewNGramTokenizer(3, 3) ) for _, tc := range []struct { desc string - t *NGramTokenizerV2 + t *NGramTokenizer input string exp []string }{ + { + t: three, + input: "", + exp: []string{}, + }, + { + t: three, + input: "ab", + exp: []string{}, + }, { t: three, input: "abcdefg", @@ -53,6 +46,19 @@ func TestNGramIterator(t *testing.T) { input: "abcdefgh", exp: []string{"abc", "efg"}, }, + { + t: three, + input: "日本語", + exp: []string{"日本語"}, + }, + { + t: four, + input: "日本語日本語", + exp: []string{ + "日本語日", + "本語日本", + "語日本語"}, + }, } { t.Run(tc.desc, func(t *testing.T) { itr := tc.t.Tokens(tc.input) @@ -65,518 +71,42 @@ func TestNGramIterator(t *testing.T) { } } -func TestNGrams(t *testing.T) { - tokenizer := NewNGramTokenizer(2, 4, 0) - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "two chars", - input: "ab", - exp: []Token{{Key: []byte("ab")}}, - }, - { - desc: "three chars", - input: "abc", - exp: []Token{{Key: []byte("ab")}, {Key: []byte("bc")}, {Key: []byte("abc")}}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("ab")}, {Key: []byte("bc")}, {Key: []byte("abc")}, {Key: []byte("cd")}, {Key: []byte("bcd")}}, - }, - { - desc: "foo", - input: "日本語", - exp: []Token{{Key: []byte("日本")}, {Key: []byte("本語")}, {Key: []byte("日本語")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func TestNGramsSkip(t *testing.T) { - - for _, tc := range []struct { - desc string - tokenizer *NgramTokenizer - input string - exp []Token - }{ - { - desc: "four chars", - tokenizer: twoSkipOne, - input: "abcd", - exp: []Token{{Key: []byte("ab")}, {Key: []byte("cd")}}, - }, - { - desc: "special chars", - tokenizer: twoSkipOne, - input: "日本語", - exp: []Token{{Key: []byte("日本")}}, - }, - { - desc: "multi", - tokenizer: NewNGramTokenizer(2, 4, 1), - input: "abcdefghij", - exp: []Token{ - {Key: []byte("ab")}, - {Key: []byte("abc")}, - {Key: []byte("cd")}, - {Key: []byte("cde")}, - {Key: []byte("ef")}, - {Key: []byte("efg")}, - {Key: []byte("gh")}, - {Key: []byte("ghi")}, - {Key: []byte("ij")}, - }, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tc.tokenizer.Tokens(tc.input)) - }) - } -} - -func Test3GramSkip0Tokenizer(t *testing.T) { - tokenizer := three - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "three char", - input: "abc", - exp: []Token{{Key: []byte("abc")}}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("abc")}, {Key: []byte("bcd")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test3GramSkip1Tokenizer(t *testing.T) { - tokenizer := threeSkip1 - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "three char", - input: "abc", - exp: []Token{{Key: []byte("abc")}}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("abc")}}, - }, - { - desc: "five chars", - input: "abcde", - exp: []Token{{Key: []byte("abc")}, {Key: []byte("cde")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test3GramSkip2Tokenizer(t *testing.T) { - tokenizer := threeSkip2 - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("abc")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test4GramSkip0Tokenizer(t *testing.T) { - tokenizer := four - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "three char", - input: "abc", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("abcd")}}, - }, - { - desc: "five chars", - input: "abcde", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("bcde")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test4GramSkip1Tokenizer(t *testing.T) { - tokenizer := fourSkip1 - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "three char", - input: "abc", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("abcd")}}, - }, - { - desc: "five chars", - input: "abcde", - exp: []Token{{Key: []byte("abcd")}}, - }, - { - desc: "six chars", - input: "abcdef", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}}, - }, - { - desc: "seven chars", - input: "abcdefg", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}}, - }, - { - desc: "eight chars", - input: "abcdefgh", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("cdef")}, {Key: []byte("efgh")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test4GramSkip2Tokenizer(t *testing.T) { - tokenizer := fourSkip2 - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "three char", - input: "abc", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{{Key: []byte("abcd")}}, - }, - { - desc: "five chars", - input: "abcde", - exp: []Token{{Key: []byte("abcd")}}, - }, - { - desc: "six chars", - input: "abcdef", - exp: []Token{{Key: []byte("abcd")}}, - }, - { - desc: "seven chars", - input: "abcdefg", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}}, - }, - { - desc: "eight chars", - input: "abcdefgh", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}}, - }, - { - desc: "nine chars", - input: "abcdefghi", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}}, - }, - { - desc: "ten chars", - input: "abcdefghij", - exp: []Token{{Key: []byte("abcd")}, {Key: []byte("defg")}, {Key: []byte("ghij")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func Test5GramSkip0Tokenizer(t *testing.T) { - tokenizer := five - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "three char", - input: "abc", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{}, - }, - { - desc: "five chars", - input: "abcde", - exp: []Token{{Key: []byte("abcde")}}, - }, - { - desc: "six chars", - input: "abcdef", - exp: []Token{{Key: []byte("abcde")}, {Key: []byte("bcdef")}}, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} +func TestPrefixedIterator(t *testing.T) { + var ( + three = NewNGramTokenizer(3, 0) + ) -func Test6GramSkip0Tokenizer(t *testing.T) { - tokenizer := six for _, tc := range []struct { desc string input string - exp []Token + exp []string }{ { - desc: "empty", input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "three char", - input: "abc", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{}, + exp: []string{}, }, { - desc: "five chars", - input: "abcde", - exp: []Token{}, - }, - { - desc: "six chars", - input: "abcdef", - exp: []Token{{Key: []byte("abcdef")}}, + input: "ab", + exp: []string{}, }, { - desc: "seven chars", input: "abcdefg", - exp: []Token{{Key: []byte("abcdef")}, {Key: []byte("bcdefg")}}, + exp: []string{"0123abc", "0123bcd", "0123cde", "0123def", "0123efg"}, }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, tokenizer.Tokens(tc.input)) - }) - } -} - -func makeBuf(from, through, checksum int) []byte { - p := make([]byte, 0, 256) - i64buf := make([]byte, binary.MaxVarintLen64) - i32buf := make([]byte, 4) - binary.PutVarint(i64buf, int64(from)) - p = append(p, i64buf...) - binary.PutVarint(i64buf, int64(through)) - p = append(p, i64buf...) - binary.LittleEndian.PutUint32(i32buf, uint32(checksum)) - p = append(p, i32buf...) - return p -} - -func TestWrappedTokenizer(t *testing.T) { - tokenizer := threeSkip2 - for _, tc := range []struct { - desc string - input string - exp []Token - }{ - { - desc: "empty", - input: "", - exp: []Token{}, - }, - { - desc: "single char", - input: "a", - exp: []Token{}, - }, - { - desc: "four chars", - input: "abcd", - exp: []Token{ - {Key: append(makeBuf(0, 999999, 1), []byte("abc")...)}, - {Key: []byte("abc")}}, - }, { - desc: "uuid", - input: "2b1a5e46-36a2-4694-a4b1-f34cc7bdfc45", - exp: []Token{ - {Key: append(makeBuf(0, 999999, 1), []byte("2b1")...)}, - {Key: []byte("2b1")}, - {Key: append(makeBuf(0, 999999, 1), []byte("a5e")...)}, - {Key: []byte("a5e")}, - {Key: append(makeBuf(0, 999999, 1), []byte("46-")...)}, - {Key: []byte("46-")}, - {Key: append(makeBuf(0, 999999, 1), []byte("36a")...)}, - {Key: []byte("36a")}, - {Key: append(makeBuf(0, 999999, 1), []byte("2-4")...)}, - {Key: []byte("2-4")}, - {Key: append(makeBuf(0, 999999, 1), []byte("694")...)}, - {Key: []byte("694")}, - {Key: append(makeBuf(0, 999999, 1), []byte("-a4")...)}, - {Key: []byte("-a4")}, - {Key: append(makeBuf(0, 999999, 1), []byte("b1-")...)}, - {Key: []byte("b1-")}, - {Key: append(makeBuf(0, 999999, 1), []byte("f34")...)}, - {Key: []byte("f34")}, - {Key: append(makeBuf(0, 999999, 1), []byte("cc7")...)}, - {Key: []byte("cc7")}, - {Key: append(makeBuf(0, 999999, 1), []byte("bdf")...)}, - {Key: []byte("bdf")}, - {Key: append(makeBuf(0, 999999, 1), []byte("c45")...)}, - {Key: []byte("c45")}, - }, + input: "日本語", + exp: []string{"0123日本語"}, }, } { + prefix := []byte("0123") t.Run(tc.desc, func(t *testing.T) { - chunkTokenizer := ChunkIDTokenizer(tokenizer) - chunkTokenizer.Reinit(logproto.ChunkRef{From: 0, Through: 999999, Checksum: 1}) - require.Equal(t, tc.exp, chunkTokenizer.Tokens(tc.input)) + itr := NewPrefixedTokenIter(prefix, three.Tokens(tc.input)) + for _, exp := range tc.exp { + require.True(t, itr.Next()) + require.Equal(t, exp, string(itr.At())) + } + require.False(t, itr.Next()) }) } } @@ -594,8 +124,8 @@ sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit a func BenchmarkTokens(b *testing.B) { var ( - v2Three = NewNGramTokenizerV2(3, 0) - v2ThreeSkip1 = NewNGramTokenizerV2(3, 1) + v2Three = NewNGramTokenizer(3, 0) + v2ThreeSkip1 = NewNGramTokenizer(3, 1) // fp + from + through + checksum chunkPrefixLen = 8 + 8 + 8 + 4 @@ -613,14 +143,6 @@ func BenchmarkTokens(b *testing.B) { { desc: "three", impls: []impl{ - { - desc: "v1", - f: func() { - for _, tok := range three.Tokens(lorem) { - _ = tok - } - }, - }, { desc: "v2", f: func() { @@ -635,14 +157,6 @@ func BenchmarkTokens(b *testing.B) { { desc: "threeSkip1", impls: []impl{ - { - desc: "v1", - f: func() { - for _, tok := range threeSkip1.Tokens(lorem) { - _ = tok - } - }, - }, { desc: "v2", f: func() { @@ -657,18 +171,6 @@ func BenchmarkTokens(b *testing.B) { { desc: "threeChunk", impls: []impl{ - { - desc: "v1", - f: func() func() { - chunkTokenizer := ChunkIDTokenizer(three) - chunkTokenizer.Reinit(logproto.ChunkRef{}) - return func() { - for _, tok := range chunkTokenizer.Tokens(lorem) { - _ = tok - } - } - }(), - }, { desc: "v2", f: func() func() { @@ -686,18 +188,6 @@ func BenchmarkTokens(b *testing.B) { { desc: "threeSkip1Chunk", impls: []impl{ - { - desc: "v1", - f: func() func() { - chunkTokenizer := ChunkIDTokenizer(threeSkip1) - chunkTokenizer.Reinit(logproto.ChunkRef{}) - return func() { - for _, tok := range chunkTokenizer.Tokens(lorem) { - _ = tok - } - } - }(), - }, { desc: "v2", f: func() func() { @@ -724,20 +214,3 @@ func BenchmarkTokens(b *testing.B) { }) } } - -func BenchmarkWrappedTokens(b *testing.B) { - chunkTokenizer := ChunkIDTokenizer(three) - chunkTokenizer.Reinit(logproto.ChunkRef{From: 0, Through: 999999, Checksum: 1}) - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - _ = chunkTokenizer.Tokens(line) - } - } -} diff --git a/tools/tsdb/bloom-tester/lib.go b/tools/tsdb/bloom-tester/lib.go index 7eefb56342c40..36926bcd30343 100644 --- a/tools/tsdb/bloom-tester/lib.go +++ b/tools/tsdb/bloom-tester/lib.go @@ -89,18 +89,10 @@ func execute() { } var ( - three = bt.NewNGramTokenizer(3, 4, 0) - threeSkip1 = bt.NewNGramTokenizer(3, 4, 1) - threeSkip2 = bt.NewNGramTokenizer(3, 4, 2) - threeSkip3 = bt.NewNGramTokenizer(3, 4, 3) - four = bt.NewNGramTokenizer(4, 5, 0) - fourSkip1 = bt.NewNGramTokenizer(4, 5, 1) - fourSkip2 = bt.NewNGramTokenizer(4, 5, 2) - five = bt.NewNGramTokenizer(5, 6, 0) - six = bt.NewNGramTokenizer(6, 7, 0) - - onePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.01, 0.8) } - fivePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.05, 0.8) } + three = bt.NewNGramTokenizer(3, 0) + four = bt.NewNGramTokenizer(4, 0) + + onePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.01, 0.8) } ) var experiments = []Experiment{ @@ -116,7 +108,7 @@ var experiments = []Experiment{ */ NewExperiment( "token=4skip0_error=1%_indexchunks=true", - four, + *four, true, onePctError, ), @@ -344,7 +336,7 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS tenant, ls.String(), objectClient) { - bloomTokenizer.SetLineTokenizer(experiment.tokenizer) + bloomTokenizer.SetLineTokenizer(&experiment.tokenizer) level.Info(util_log.Logger).Log("Starting work on: ", ls.String(), "'", FNV32a(ls.String()), "'", experiment.name, tenant) startTime := time.Now().UnixMilli() diff --git a/tools/tsdb/bloom-tester/lib_test.go b/tools/tsdb/bloom-tester/lib_test.go index 419ff44f59007..3269592f4abcb 100644 --- a/tools/tsdb/bloom-tester/lib_test.go +++ b/tools/tsdb/bloom-tester/lib_test.go @@ -16,7 +16,7 @@ func BenchmarkSBFTestAndAdd(b *testing.B) { scanner := bufio.NewScanner(file) experiment := NewExperiment( "token=3skip0_error=1%_indexchunks=true", - three, + *three, true, onePctError, ) @@ -25,8 +25,10 @@ func BenchmarkSBFTestAndAdd(b *testing.B) { for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - sbf.TestAndAdd(token.Key) + + for tokens.Next() { + tok := tokens.At() + sbf.TestAndAdd(tok) } } } @@ -40,7 +42,7 @@ func BenchmarkSBFAdd(b *testing.B) { scanner := bufio.NewScanner(file) experiment := NewExperiment( "token=3skip0_error=1%_indexchunks=true", - three, + *three, true, onePctError, ) @@ -49,8 +51,10 @@ func BenchmarkSBFAdd(b *testing.B) { for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - sbf.Add(token.Key) + + for tokens.Next() { + tok := tokens.At() + sbf.TestAndAdd(tok) } } } @@ -64,7 +68,7 @@ func BenchmarkSBFSeparateTestAndAdd(b *testing.B) { scanner := bufio.NewScanner(file) experiment := NewExperiment( "token=3skip0_error=1%_indexchunks=true", - three, + *three, true, onePctError, ) @@ -73,45 +77,16 @@ func BenchmarkSBFSeparateTestAndAdd(b *testing.B) { for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - found := sbf.Test(token.Key) - if !found { - sbf.Add(token.Key) - } - } - } - } -} -func BenchmarkSBFTestAndAddWithLRU(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - cache := NewLRUCache4(150000) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - if !cache.Get(token.Key) { - cache.Put(token.Key) - sbf.TestAndAdd(token.Key) - } + for tokens.Next() { + tok := tokens.At() + sbf.TestAndAdd(tok) } } } } -func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) { +func BenchmarkSBFTestAndAddWithLRU(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() file, _ := os.Open(BigFile) @@ -119,7 +94,7 @@ func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) { scanner := bufio.NewScanner(file) experiment := NewExperiment( "token=3skip0_error=1%_indexchunks=true", - three, + *three, true, onePctError, ) @@ -129,151 +104,20 @@ func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) { for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - if !cache.Get(token.Key) { - cache.Put(token.Key) - - found := sbf.Test(token.Key) - if !found { - sbf.Add(token.Key) - } - //sbf.TestAndAdd(token.Key) - } - } - } - } -} - -func BenchmarkSBFSeparateTestAndAddWithLRU5(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - cache := NewLRUCache5(150000) - - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - str := string(token.Key) - if !cache.Get(str) { - cache.Put(str) - - found := sbf.Test(token.Key) - if !found { - sbf.Add(token.Key) - } - } - } - } - } -} - -func BenchmarkSBFTestAndAddWithLRU5(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - cache := NewLRUCache5(150000) - - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - str := string(token.Key) - if !cache.Get(str) { - cache.Put(str) - - sbf.TestAndAdd(token.Key) - } - } - } - } -} - -func BenchmarkSBFTestAndAddWithByteKeyLRU(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=4skip0_error=1%_indexchunks=false", - four, - false, - onePctError, - ) - sbf := experiment.bloom() - cache := NewByteKeyLRUCache(150000) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - - array := NewFourByteKeyFromSlice(token.Key) - if !cache.Get(array) { - cache.Put(array) - sbf.TestAndAdd(token.Key) - } - } - } - } -} - -func BenchmarkSBFTestAndAddWithFourByteKeyLRU(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=4skip0_error=1%_indexchunks=false", - four, - false, - onePctError, - ) - sbf := experiment.bloom() - cache := NewFourByteKeyLRUCache(150000) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - if !cache.Get([4]byte(token.Key)) { - cache.Put([4]byte(token.Key)) - found := sbf.Test(token.Key) - if !found { - sbf.Add(token.Key) - } - //sbf.TestAndAdd(token.Key) + for tokens.Next() { + tok := tokens.At() + if !cache.Get(tok) { + cache.Put(tok) + sbf.TestAndAdd(tok) } - + sbf.TestAndAdd(tok) } } } } -func BenchmarkSBFAddWithLRU(b *testing.B) { +func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() file, _ := os.Open(BigFile) @@ -281,7 +125,7 @@ func BenchmarkSBFAddWithLRU(b *testing.B) { scanner := bufio.NewScanner(file) experiment := NewExperiment( "token=3skip0_error=1%_indexchunks=true", - three, + *three, true, onePctError, ) @@ -291,44 +135,16 @@ func BenchmarkSBFAddWithLRU(b *testing.B) { for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - if !cache.Get(token.Key) { - cache.Put(token.Key) - sbf.Add(token.Key) - } - } - } - } -} - -func BenchmarkSBFSeparateTestAndAddWithLRU1(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - cache := NewLRUCache(150000) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - str := string(token.Key) - if !cache.Get(str) { - cache.Put(str) - found := sbf.Test(token.Key) + for tokens.Next() { + tok := tokens.At() + if !cache.Get(tok) { + cache.Put(tok) + found := sbf.Test(tok) if !found { - sbf.Add(token.Key) + sbf.Add(tok) } - //sbf.Add(token.Key) } + sbf.TestAndAdd(tok) } } } @@ -342,7 +158,7 @@ func BenchmarkSBFSeparateTestAndAddWithMap(b *testing.B) { scanner := bufio.NewScanner(file) experiment := NewExperiment( "token=3skip0_error=1%_indexchunks=true", - three, + *three, true, onePctError, ) @@ -352,15 +168,15 @@ func BenchmarkSBFSeparateTestAndAddWithMap(b *testing.B) { for scanner.Scan() { line := scanner.Text() tokens := experiment.tokenizer.Tokens(line) - for _, token := range tokens { - str := string(token.Key) - - _, found := cache[str] + for tokens.Next() { + tok := tokens.At() + tokStr := string(tok) + _, found := cache[tokStr] if !found { - cache[str] = "" - f := sbf.Test(token.Key) + cache[tokStr] = "" + f := sbf.Test(tok) if !f { - sbf.Add(token.Key) + sbf.Add(tok) } if len(cache) > 150000 { diff --git a/tools/tsdb/bloom-tester/metrics.go b/tools/tsdb/bloom-tester/metrics.go index 193f829063db8..2805901a3b9c3 100644 --- a/tools/tsdb/bloom-tester/metrics.go +++ b/tools/tsdb/bloom-tester/metrics.go @@ -10,12 +10,12 @@ import ( type Experiment struct { name string - tokenizer bt.Tokenizer + tokenizer bt.NGramTokenizer bloom func() *filter.ScalableBloomFilter encodeChunkID bool } -func NewExperiment(name string, tokenizer bt.Tokenizer, encodeChunkID bool, bloom func() *filter.ScalableBloomFilter) Experiment { +func NewExperiment(name string, tokenizer bt.NGramTokenizer, encodeChunkID bool, bloom func() *filter.ScalableBloomFilter) Experiment { return Experiment{ name: name, tokenizer: tokenizer, diff --git a/tools/tsdb/bloom-tester/readlib.go b/tools/tsdb/bloom-tester/readlib.go index eaca7a38c15bd..93b0ba75b6d15 100644 --- a/tools/tsdb/bloom-tester/readlib.go +++ b/tools/tsdb/bloom-tester/readlib.go @@ -4,7 +4,6 @@ import ( "context" "flag" "fmt" - "github.com/grafana/dskit/services" "github.com/grafana/loki/pkg/chunkenc" @@ -200,10 +199,10 @@ func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexSh tenant, ls.String(), objectClient) - bloomTokenizer.SetLineTokenizer(experiment.tokenizer) + bloomTokenizer.SetLineTokenizer(&experiment.tokenizer) for gotIdx := range got { // for every chunk for _, queryExperiment := range queryExperiments { // for each search string - if len(queryExperiment.searchString) >= experiment.tokenizer.GetMin()+experiment.tokenizer.GetSkip() { + if len(queryExperiment.searchString) >= experiment.tokenizer.N+experiment.tokenizer.Skip { foundInChunk := false foundInSbf := false @@ -245,11 +244,6 @@ func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexSh helpers.ExitErr("iterating chunks ", itr.Error()) } - /*else // if search string is long enough - { - // fmt.Println("Skipping", queryExperiment.name, "because it's too short", experiment.name) - }*/ - } // for each search string } // for every chunk @@ -306,21 +300,21 @@ func readSBFFromObjectStorage(location, prefix, period, tenant, series string, o return sbf } -func searchSbf(sbf *filter.ScalableBloomFilter, tokenizer bt.Tokenizer, searchString string) bool { - tokens := bt.SearchesForTokenizerAndLine(tokenizer, searchString) - for _, tokenSet := range tokens { - numMatches := 0 - for _, token := range tokenSet { - if sbf.Test(token.Key) { - numMatches++ - } +func searchSbf(sbf *filter.ScalableBloomFilter, tokenizer bt.NGramTokenizer, searchString string) bool { + itr := tokenizer.Tokens(searchString) + numMatches := 0 + numTokens := 0 + for itr.Next() { + token := itr.At() + numTokens++ + if sbf.Test(token) { + numMatches++ } - if numMatches > 0 { - if numMatches == len(tokenSet) { - return true - } + } + if numMatches > 0 { + if numMatches == numTokens { + return true } - } return false diff --git a/tools/tsdb/bloom-tester/readlib_test.go b/tools/tsdb/bloom-tester/readlib_test.go index 5216918010bc1..edec2c37fe599 100644 --- a/tools/tsdb/bloom-tester/readlib_test.go +++ b/tools/tsdb/bloom-tester/readlib_test.go @@ -1,7 +1,6 @@ package main import ( - bt "github.com/grafana/loki/pkg/storage/bloom/v1" "testing" "github.com/stretchr/testify/require" @@ -10,7 +9,7 @@ import ( func TestSearchSbf(t *testing.T) { experiment := NewExperiment( "token=4skip0_error=1%_indexchunks=true", - four, + *four, true, onePctError, ) @@ -66,13 +65,13 @@ func TestSearchSbf(t *testing.T) { } { t.Run(tc.desc, func(t *testing.T) { sbf := experiment.bloom() - tokens := bt.SearchesForTokenizerAndLine(four, tc.inputLine) - for _, tokenSet := range tokens { - for _, token := range tokenSet { - sbf.Add(token.Key) - } + tokens := four.Tokens(tc.inputLine) + for tokens.Next() { + tok := tokens.At() + sbf.Add(tok) } - require.Equal(t, tc.exp, searchSbf(sbf, four, tc.inputSearch)) + + require.Equal(t, tc.exp, searchSbf(sbf, *four, tc.inputSearch)) }) } } From 49d5761d90a022826684edbdf0e6c91962b98471 Mon Sep 17 00:00:00 2001 From: Bilal Khan <64713734+ibilalkayy@users.noreply.github.com> Date: Tue, 21 Nov 2023 04:27:05 +0500 Subject: [PATCH 22/48] Fixed the grammatical mistake in an _index.md file (#11226) **What this PR does / why we need it**: In this PR, I fixed the grammatical mistake in an _index.md file. **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) Signed-off-by: Bilal Khan Co-authored-by: J Stickler --- docs/sources/alert/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/alert/_index.md b/docs/sources/alert/_index.md index 2a0d0826ff254..a6d29cec5ac19 100644 --- a/docs/sources/alert/_index.md +++ b/docs/sources/alert/_index.md @@ -155,7 +155,7 @@ At the time of writing, these are the compatible backends that support this: - [Grafana Mimir](/docs/mimir/latest/operators-guide/reference-http-api/#remote-write) - [Thanos (`Receiver`)](https://thanos.io/tip/components/receive.md/) -Here is an example remote-write configuration for sending to a local Prometheus instance: +Here is an example of a remote-write configuration for sending data to a local Prometheus instance: ```yaml ruler: From 658f4f1f26501b7e5b84e28a8755f4ba287d14ad Mon Sep 17 00:00:00 2001 From: Pavaningithub <58260359+Pavaningithub@users.noreply.github.com> Date: Tue, 21 Nov 2023 04:57:41 +0530 Subject: [PATCH 23/48] Fix Typo in the _index.md (#11242) **What this PR does / why we need it**: Fixing the small typo in the provided example [here](https://grafana.com/docs/loki/latest/setup/install/helm/configure-storage/#configure-storage). This PR just updates the doc. **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- docs/sources/setup/install/helm/configure-storage/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/setup/install/helm/configure-storage/_index.md b/docs/sources/setup/install/helm/configure-storage/_index.md index 105a3fd5397d4..2feafaafeeb8c 100644 --- a/docs/sources/setup/install/helm/configure-storage/_index.md +++ b/docs/sources/setup/install/helm/configure-storage/_index.md @@ -46,7 +46,7 @@ This guide assumes Loki will be installed in one of the modes above and that a ` ``` serviceAccount: annotations: - "eks.amazonaws.com/role-arn": "arn:aws:iam:::role/: + "eks.amazonaws.com/role-arn": "arn:aws:iam:::role/" ``` 3. Configure the storage: From 529084935c5a0498f3ff169b2b351ceff7b71d7e Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Tue, 21 Nov 2023 15:36:46 +0530 Subject: [PATCH 24/48] use nanosecond precision for timestamp in compacted boltdb-shipper index file names (#11277) **What this PR does / why we need it**: In PR #9884, we split the compaction and retention loop to run them concurrently. Although we make sure we do not work on the same index from compaction and retention loop, there is a chance that one could run immediately after the other and finish quickly enough to build the index with the same name as the previous one because in `boltdb-shipper` index, we use epoch with `Seconds` precision while building the name of the compacted index file. Since compaction uploads the new file first and then deletes the old file, if the index is built with the same name, we end up uploading the file and deleting it afterwards. This PR fixes the issue by using ns precision for the timestamp in the filenames. **Special notes for your reviewer**: This is not a problem for TSDB since we also add a checksum to the filenames of the index during compaction. --- .../indexshipper/boltdb/compactor/compacted_index.go | 4 ++-- .../indexshipper/boltdb/compactor/table_compactor.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go index d1ea9fcca68ff..584116b240417 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index.go @@ -62,7 +62,7 @@ func (c *CompactedIndex) isEmpty() (bool, error) { // bbolt.Compact fills the whole page by setting FillPercent to 1 which works well here since while copying the data, it receives the index entries in order. // The storage space goes down from anywhere between 25% to 50% as per my(Sandeep) tests. func (c *CompactedIndex) recreateCompactedDB() error { - destDB, err := openBoltdbFileWithNoSync(filepath.Join(c.workingDir, fmt.Sprint(time.Now().Unix()))) + destDB, err := openBoltdbFileWithNoSync(filepath.Join(c.workingDir, fmt.Sprint(time.Now().UnixNano()))) if err != nil { return err } @@ -178,7 +178,7 @@ func (c *CompactedIndex) ToIndexFile() (shipperindex.Index, error) { if c.compactedFileRecreated { fileNameFormat = "%s" + recreatedCompactedDBSuffix } - fileName := fmt.Sprintf(fileNameFormat, shipperutil.BuildIndexFileName(c.tableName, uploaderName, fmt.Sprint(time.Now().Unix()))) + fileName := fmt.Sprintf(fileNameFormat, shipperutil.BuildIndexFileName(c.tableName, uploaderName, fmt.Sprint(time.Now().UnixNano()))) idxFile := boltdb.BoltDBToIndexFile(c.compactedFile, fileName) c.compactedFile = nil diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go index 95a02137fcb75..d864d306a2ba7 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go @@ -248,7 +248,7 @@ func (t *tableCompactor) fetchOrCreateUserCompactedIndexSet(userID string) error return err } - compactedFile, err := openBoltdbFileWithNoSync(filepath.Join(userIndexSet.GetWorkingDir(), fmt.Sprint(time.Now().Unix()))) + compactedFile, err := openBoltdbFileWithNoSync(filepath.Join(userIndexSet.GetWorkingDir(), fmt.Sprint(time.Now().UnixNano()))) if err != nil { return err } @@ -272,7 +272,7 @@ func (t *tableCompactor) fetchOrCreateUserCompactedIndexSet(userID string) error func (t *tableCompactor) compactUserIndexes(idxSet compactor.IndexSet) (*CompactedIndex, error) { indexes := idxSet.ListSourceFiles() workingDir := idxSet.GetWorkingDir() - compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().Unix())) + compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().UnixNano())) compactedFile, err := openBoltdbFileWithNoSync(compactedDBName) if err != nil { @@ -318,7 +318,7 @@ func (t *tableCompactor) compactCommonIndexes(ctx context.Context) (*CompactedIn indexes := idxSet.ListSourceFiles() compactedFileIdx := compactedFileIdx(indexes) workingDir := idxSet.GetWorkingDir() - compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().Unix())) + compactedDBName := filepath.Join(workingDir, fmt.Sprint(time.Now().UnixNano())) // if we find a previously compacted file, use it as a seed file to copy other index into it if compactedFileIdx != -1 { From 4455cd9d7d173896969d1d3589b2e9084af393c2 Mon Sep 17 00:00:00 2001 From: Quentin Bisson Date: Tue, 21 Nov 2023 16:24:17 +0100 Subject: [PATCH 25/48] [helm] Fix tracing configuration (#11186) **What this PR does / why we need it**: This PR allows user to enable tracing in the new SSD setup and fixes incorrect documentation because it is currently impossible to enable tracing in this chart (cf. https://github.com/grafana/loki/blob/766f27645d2610a36eaaca8418482b740ae14215/cmd/loki/main.go#L81) **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --------- Signed-off-by: QuentinBisson --- docs/sources/operations/troubleshooting.md | 6 +++++- docs/sources/setup/install/helm/reference.md | 20 +++++++++++--------- production/helm/loki/CHANGELOG.md | 4 ++++ production/helm/loki/Chart.yaml | 2 +- production/helm/loki/README.md | 2 +- production/helm/loki/values.yaml | 8 ++++++-- 6 files changed, 28 insertions(+), 14 deletions(-) diff --git a/docs/sources/operations/troubleshooting.md b/docs/sources/operations/troubleshooting.md index fd65e9a4d9a97..9fd4e4b8dcf38 100644 --- a/docs/sources/operations/troubleshooting.md +++ b/docs/sources/operations/troubleshooting.md @@ -173,7 +173,11 @@ Jaeger is running. If you deploy with Helm, use the following command: ```bash -$ helm upgrade --install loki loki/loki --set "loki.tracing.jaegerAgentHost=YOUR_JAEGER_AGENT_HOST" +$ helm upgrade --install loki loki/loki --set "loki.tracing.enabled=true" + --set "read.extraEnv[0].name=JAEGER_AGENT_HOST" --set "read.extraEnv[0].value=" + --set "write.extraEnv[0].name=JAEGER_AGENT_HOST" --set "write.extraEnv[0].value=" + --set "backend.extraEnv[0].name=JAEGER_AGENT_HOST" --set "backend.extraEnv[0].value=" + --set "gateway.extraEnv[0].name=JAEGER_AGENT_HOST" --set "gateway.extraEnv[0].value=" ``` ## Running Loki with Istio Sidecars diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 833cc2c77edc8..ede76840c8f6c 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -2297,6 +2297,17 @@ null
 []
 
+ + + + loki.tracing + object + Enable tracing +
+{
+  "enabled": false
+}
+
@@ -4393,15 +4404,6 @@ null
 "1m"
 
- - - - tracing.jaegerAgentHost - string - -
-""
-
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 7f45b3155661c..51dd2deb2be54 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 5.37.0 + +- [FEATURE] Add support for enabling tracing. + ## 5.36.2 - [BUGFIX] Add support to run dnsmasq diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 06768ba93d2d1..39e800d6193e0 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.9.2 -version: 5.36.3 +version: 5.37.0 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index b5cd5883819aa..7fc83086785b3 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.36.3](https://img.shields.io/badge/Version-5.36.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) +![Version: 5.37.0](https://img.shields.io/badge/Version-5.37.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index de6048aecc712..472882a226c8b 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -240,6 +240,9 @@ loki: distributor: {{- tpl (. | toYaml) $ | nindent 4 }} {{- end }} + + tracing: + enabled: {{ .Values.loki.tracing.enabled }} # Should authentication be enabled auth_enabled: true # -- memberlist configuration (overrides embedded default) @@ -344,6 +347,9 @@ loki: scheduler_address: '{{ include "loki.querySchedulerAddress" . }}' # -- Optional distributor configuration distributor: {} + # -- Enable tracing + tracing: + enabled: false enterprise: # Enable enterprise features, license must be provided enabled: false @@ -1474,8 +1480,6 @@ networkPolicy: podSelector: {} # -- Specifies the namespace the discovery Pods are running in namespaceSelector: {} -tracing: - jaegerAgentHost: "" # ------------------------------------- # Configuration for `minio` child chart # ------------------------------------- From 6e93d150d8d012bb838079d00addb1ed9485b681 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 21 Nov 2023 07:48:51 -0800 Subject: [PATCH 26/48] utilities for reducing ngram allocations in token iterator construction (#11276) Previously, the `calculatePrefix` function was used to generate the token buffer used, but this wouldn't necessarily have enough space to append the ngram afterwards without reallocating+copying the token buffer. This PR ensures we allocate enough space for both prefix and ngram in the token buffer. --- pkg/storage/bloom/v1/bloom_tokenizer.go | 37 +++++++++------- pkg/storage/bloom/v1/bloom_tokenizer_test.go | 44 ++++++++++++++++++++ pkg/storage/bloom/v1/tokenizer.go | 16 ++++--- pkg/storage/bloom/v1/tokenizer_test.go | 15 ++++--- 4 files changed, 82 insertions(+), 30 deletions(-) diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index c5dd5e514507f..93830cac8953d 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -2,7 +2,6 @@ package v1 import ( "context" - "encoding/binary" "math" "time" @@ -14,6 +13,7 @@ import ( "github.com/grafana/loki/pkg/logql/log" "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/util/encoding" util_log "github.com/grafana/loki/pkg/util/log" ) @@ -68,27 +68,32 @@ func clearCache(cache map[string]interface{}) { } } -func calculatePrefix(chk logproto.ChunkRef) []byte { - i64buf := make([]byte, binary.MaxVarintLen64) - i32buf := make([]byte, 4) - prefix := make([]byte, 32) - - binary.PutVarint(i64buf, int64(chk.From)) - prefix = append(prefix, i64buf...) - binary.PutVarint(i64buf, int64(chk.Through)) - prefix = append(prefix, i64buf...) - binary.LittleEndian.PutUint32(i32buf, chk.Checksum) - prefix = append(prefix, i32buf...) - - return prefix +// prefixedToken returns a byte slice with sufficient capacity for a chunk-ref prefixed token +// of specific ngram length, along with the length of the prefix. +// It ensures enough capacity for the prefix and the token so additional tokens can be created +// without allocations by appending them to the prefix length +func prefixedToken(ngram int, chk logproto.ChunkRef) ([]byte, int) { + var enc encoding.Encbuf + enc.PutBE64(uint64(chk.From)) + enc.PutBE64(uint64(chk.Through)) + enc.PutBE32(chk.Checksum) + prefixLn := enc.Len() // record the length of the prefix + + enc.PutBytes(make([]byte, ngram*MaxRuneLen)) // ensure enough capacity for the ngram + + // return the underlying byte slice and the length of the prefix + return enc.Get(), prefixLn } // PopulateSeriesWithBloom is intended to be called on the write path, and is used to populate the bloom filter for a given series. func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks []chunk.Chunk) { clearCache(bt.cache) + + // allocate a reusable key buffer long enough to store both the chunk ref and the ngram + for idx := range chunks { lc := chunks[idx].Data.(*chunkenc.Facade).LokiChunk() - prefix := calculatePrefix(chunks[idx].ChunkRef) + tokenBuf, prefixLn := prefixedToken(bt.lineTokenizer.N, chunks[idx].ChunkRef) // TODO: error handling itr, err := lc.Iterator( @@ -106,7 +111,7 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo defer itr.Close() for itr.Next() && itr.Error() == nil { - chunkTokenizer := NewPrefixedTokenIter(prefix, bt.lineTokenizer.Tokens(itr.Entry().Line)) + chunkTokenizer := NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(itr.Entry().Line)) for chunkTokenizer.Next() { tok := chunkTokenizer.At() if tok != nil { diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index 104524da479f7..050aa61a7e60e 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/pkg/chunkenc" + "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/push" "github.com/grafana/loki/pkg/storage/chunk" @@ -24,6 +25,49 @@ var ( four = NewNGramTokenizer(4, 0) ) +func TestPrefixedKeyCreation(t *testing.T) { + var ones uint64 = 0xffffffffffffffff + + ref := logproto.ChunkRef{ + From: 0, + Through: model.Time(int64(ones)), + Checksum: 0xffffffff, + } + for _, tc := range []struct { + desc string + ngram, expLen int + }{ + { + desc: "0-gram", + ngram: 0, + expLen: 20, + }, + { + desc: "4-gram", + ngram: 4, + expLen: 20 + 4*MaxRuneLen, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + token, prefixLn := prefixedToken(tc.ngram, ref) + require.Equal(t, 20, prefixLn) + require.Equal(t, tc.expLen, len(token)) + // first 8 bytes should be zeros from `from` + for i := 0; i < 8; i++ { + require.Equal(t, byte(0), token[i]) + } + // next 8 bytes should be ones from `through` + for i := 8; i < 16; i++ { + require.Equal(t, byte(255), token[i]) + } + // next 4 bytes should be ones from `checksum` + for i := 16; i < 20; i++ { + require.Equal(t, byte(255), token[i]) + } + }) + } +} + func TestSetLineTokenizer(t *testing.T) { bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go index 4582317809449..d81840229294c 100644 --- a/pkg/storage/bloom/v1/tokenizer.go +++ b/pkg/storage/bloom/v1/tokenizer.go @@ -4,6 +4,10 @@ import ( "unicode/utf8" ) +const ( + MaxRuneLen = 4 +) + func reassemble(buf []rune, ln, pos int, result []byte) []byte { result = result[:0] // Reset the result slice for i := 0; i < ln; i++ { @@ -29,7 +33,7 @@ func NewNGramTokenizer(n, skip int) *NGramTokenizer { N: n, Skip: skip, buffer: make([]rune, n+skip), - res: make([]byte, 0, n*4), // maximum 4 bytes per rune + res: make([]byte, 0, n*MaxRuneLen), // maximum 4 bytes per rune } return t @@ -89,20 +93,20 @@ func (t *NGramTokenIter) Err() error { } type PrefixedTokenIter struct { - prefix []byte + buf []byte prefixLen int NGramTokenIter } func (t *PrefixedTokenIter) At() []byte { - return append(t.prefix[:t.prefixLen], t.NGramTokenIter.At()...) + return append(t.buf[:t.prefixLen], t.NGramTokenIter.At()...) } -func NewPrefixedTokenIter(prefix []byte, iter NGramTokenIter) *PrefixedTokenIter { +func NewPrefixedTokenIter(buf []byte, prefixLn int, iter NGramTokenIter) *PrefixedTokenIter { return &PrefixedTokenIter{ - prefix: prefix, - prefixLen: len(prefix), + buf: buf, + prefixLen: prefixLn, NGramTokenIter: iter, } } diff --git a/pkg/storage/bloom/v1/tokenizer_test.go b/pkg/storage/bloom/v1/tokenizer_test.go index 3532c28a4f603..471eaea74081b 100644 --- a/pkg/storage/bloom/v1/tokenizer_test.go +++ b/pkg/storage/bloom/v1/tokenizer_test.go @@ -4,6 +4,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logproto" ) const BigFile = "../../../logql/sketch/testdata/war_peace.txt" @@ -101,7 +103,7 @@ func TestPrefixedIterator(t *testing.T) { } { prefix := []byte("0123") t.Run(tc.desc, func(t *testing.T) { - itr := NewPrefixedTokenIter(prefix, three.Tokens(tc.input)) + itr := NewPrefixedTokenIter(prefix, len(prefix), three.Tokens(tc.input)) for _, exp := range tc.exp { require.True(t, itr.Next()) require.Equal(t, exp, string(itr.At())) @@ -126,9 +128,6 @@ func BenchmarkTokens(b *testing.B) { var ( v2Three = NewNGramTokenizer(3, 0) v2ThreeSkip1 = NewNGramTokenizer(3, 1) - - // fp + from + through + checksum - chunkPrefixLen = 8 + 8 + 8 + 4 ) type impl struct { @@ -174,9 +173,9 @@ func BenchmarkTokens(b *testing.B) { { desc: "v2", f: func() func() { - prefix := make([]byte, chunkPrefixLen, 512) + buf, prefixLn := prefixedToken(v2Three.N, logproto.ChunkRef{}) return func() { - itr := NewPrefixedTokenIter(prefix, v2Three.Tokens(lorem)) + itr := NewPrefixedTokenIter(buf, prefixLn, v2Three.Tokens(lorem)) for itr.Next() { _ = itr.At() } @@ -191,9 +190,9 @@ func BenchmarkTokens(b *testing.B) { { desc: "v2", f: func() func() { - prefix := make([]byte, chunkPrefixLen, 512) + buf, prefixLn := prefixedToken(v2Three.N, logproto.ChunkRef{}) return func() { - itr := NewPrefixedTokenIter(prefix, v2ThreeSkip1.Tokens(lorem)) + itr := NewPrefixedTokenIter(buf, prefixLn, v2ThreeSkip1.Tokens(lorem)) for itr.Next() { _ = itr.At() } From 162bbb11be9c527d14dd3d01816d0091eb8691fc Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Wed, 22 Nov 2023 11:23:53 +0530 Subject: [PATCH 27/48] do not run retention loop when retention is not enabled (#11280) **What this PR does / why we need it**: Do not run retention loop when retention is not enabled --- pkg/compactor/compactor.go | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 774536152ca9d..07e8389c5b843 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -534,29 +534,29 @@ func (c *Compactor) runCompactions(ctx context.Context) { } }() - c.wg.Add(1) - go func() { - defer c.wg.Done() - if err := c.RunCompaction(ctx, true); err != nil { - level.Error(util_log.Logger).Log("msg", "failed to apply retention", err) - } + if c.cfg.RetentionEnabled { + c.wg.Add(1) + go func() { + defer c.wg.Done() + if err := c.RunCompaction(ctx, true); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to apply retention", err) + } - ticker := time.NewTicker(c.cfg.ApplyRetentionInterval) - defer ticker.Stop() + ticker := time.NewTicker(c.cfg.ApplyRetentionInterval) + defer ticker.Stop() - for { - select { - case <-ticker.C: - if err := c.RunCompaction(ctx, true); err != nil { - level.Error(util_log.Logger).Log("msg", "failed to apply retention", err) + for { + select { + case <-ticker.C: + if err := c.RunCompaction(ctx, true); err != nil { + level.Error(util_log.Logger).Log("msg", "failed to apply retention", err) + } + case <-ctx.Done(): + return } - case <-ctx.Done(): - return } - } - }() + }() - if c.cfg.RetentionEnabled { for _, container := range c.storeContainers { c.wg.Add(1) go func(sc storeContainer) { From 5b97fcfd93ae0aa341284b6557697495c445c196 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Wed, 22 Nov 2023 08:29:02 +0100 Subject: [PATCH 28/48] Send query plan to querier. (#11246) **What this PR does / why we need it**: Following https://github.com/grafana/loki/pull/11123 and in order to enable https://github.com/grafana/loki/pull/10417 the query frontend should send the serialized LogQL AST instead of the query string to the queriers. This enables the frontend to change the AST and inject expressions that are not expressible in LogQL. **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --------- Signed-off-by: Callum Styan Co-authored-by: Callum Styan --- pkg/logcli/client/file.go | 10 +- pkg/logcli/query/query.go | 22 +- pkg/logcli/query/query_test.go | 5 +- pkg/logproto/indexgateway.pb.go | 50 ++- pkg/logproto/indexgateway.proto | 1 - pkg/logproto/sketch.pb.go | 78 ++-- pkg/logproto/sketch.proto | 1 - pkg/logql/blocker.go | 4 +- pkg/logql/blocker_test.go | 13 +- pkg/logql/downstream.go | 46 ++- pkg/logql/downstream_test.go | 25 +- pkg/logql/engine.go | 27 +- pkg/logql/engine_test.go | 160 +++----- pkg/logql/evaluator.go | 69 +++- pkg/logql/evaluator_test.go | 4 +- pkg/logql/explain_test.go | 10 +- pkg/logql/metrics.go | 12 +- pkg/logql/metrics_test.go | 49 ++- pkg/logql/rangemapper.go | 8 +- pkg/logql/rangemapper_test.go | 24 +- pkg/logql/shardmapper.go | 7 +- pkg/logql/shardmapper_test.go | 12 +- pkg/logql/syntax/parser.go | 8 + pkg/logql/test_utils.go | 12 +- pkg/querier/plan/plan.go | 101 +++++ pkg/querier/plan/plan_test.go | 26 ++ pkg/querier/queryrange/codec.go | 66 +++- pkg/querier/queryrange/codec_test.go | 19 +- pkg/querier/queryrange/downstreamer.go | 21 +- pkg/querier/queryrange/downstreamer_test.go | 58 ++- pkg/querier/queryrange/marshal.go | 24 ++ pkg/querier/queryrange/queryrange.pb.go | 345 ++++++++++++------ pkg/querier/queryrange/queryrange.proto | 2 + pkg/querier/queryrange/querysharding.go | 14 +- pkg/querier/queryrange/querysharding_test.go | 40 +- pkg/querier/queryrange/roundtrip_test.go | 20 +- pkg/querier/queryrange/split_by_interval.go | 2 + pkg/querier/queryrange/split_by_range.go | 16 +- pkg/querier/queryrange/split_by_range_test.go | 26 +- pkg/querier/queryrange/stats.go | 6 +- pkg/querier/queryrange/stats_test.go | 12 +- pkg/querier/worker/util_test.go | 2 +- pkg/ruler/evaluator_local.go | 5 +- 43 files changed, 932 insertions(+), 530 deletions(-) create mode 100644 pkg/querier/plan/plan.go create mode 100644 pkg/querier/plan/plan_test.go diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go index 45681c36c2c8f..82274ef79fb8d 100644 --- a/pkg/logcli/client/file.go +++ b/pkg/logcli/client/file.go @@ -69,7 +69,7 @@ func (f *FileClient) Query(q string, limit int, t time.Time, direction logproto. ctx = user.InjectOrgID(ctx, f.orgID) - params := logql.NewLiteralParams( + params, err := logql.NewLiteralParams( q, t, t, 0, @@ -78,6 +78,9 @@ func (f *FileClient) Query(q string, limit int, t time.Time, direction logproto. uint32(limit), nil, ) + if err != nil { + return nil, fmt.Errorf("failed to parse query: %w", err) + } query := f.engine.Query(params) @@ -106,7 +109,7 @@ func (f *FileClient) QueryRange(queryStr string, limit int, start, end time.Time ctx = user.InjectOrgID(ctx, f.orgID) - params := logql.NewLiteralParams( + params, err := logql.NewLiteralParams( queryStr, start, end, @@ -116,6 +119,9 @@ func (f *FileClient) QueryRange(queryStr string, limit int, start, end time.Time uint32(limit), nil, ) + if err != nil { + return nil, err + } query := f.engine.Query(params) diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go index 6a71f0979abcf..fc5be5f393cb2 100644 --- a/pkg/logcli/query/query.go +++ b/pkg/logcli/query/query.go @@ -451,7 +451,7 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string var query logql.Query if q.isInstant() { - query = eng.Query(logql.NewLiteralParams( + params, err := logql.NewLiteralParams( q.QueryString, q.Start, q.Start, @@ -460,9 +460,14 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string q.resultsDirection(), uint32(q.Limit), nil, - )) + ) + if err != nil { + return err + } + + query = eng.Query(params) } else { - query = eng.Query(logql.NewLiteralParams( + params, err := logql.NewLiteralParams( q.QueryString, q.Start, q.End, @@ -471,7 +476,16 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string q.resultsDirection(), uint32(q.Limit), nil, - )) + ) + if err != nil { + return err + } + + query = eng.Query(params) + } + + if err != nil { + return err } // execute the query diff --git a/pkg/logcli/query/query_test.go b/pkg/logcli/query/query_test.go index 72886fb84668d..1b4c18f5265e0 100644 --- a/pkg/logcli/query/query_test.go +++ b/pkg/logcli/query/query_test.go @@ -425,7 +425,10 @@ func (t *testQueryClient) Query(_ string, _ int, _ time.Time, _ logproto.Directi func (t *testQueryClient) QueryRange(queryStr string, limit int, from, through time.Time, direction logproto.Direction, step, interval time.Duration, _ bool) (*loghttp.QueryResponse, error) { ctx := user.InjectOrgID(context.Background(), "fake") - params := logql.NewLiteralParams(queryStr, from, through, step, interval, direction, uint32(limit), nil) + params, err := logql.NewLiteralParams(queryStr, from, through, step, interval, direction, uint32(limit), nil) + if err != nil { + return nil, err + } v, err := t.engine.Query(params).Exec(ctx) if err != nil { diff --git a/pkg/logproto/indexgateway.pb.go b/pkg/logproto/indexgateway.pb.go index e8b569ea07323..86b2665e86b17 100644 --- a/pkg/logproto/indexgateway.pb.go +++ b/pkg/logproto/indexgateway.pb.go @@ -6,7 +6,6 @@ package logproto import ( context "context" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -28,31 +27,30 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("pkg/logproto/indexgateway.proto", fileDescriptor_d27585148d0a52c8) } var fileDescriptor_d27585148d0a52c8 = []byte{ - // 372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x4a, 0xfb, 0x30, - 0x1c, 0xc7, 0x1b, 0xf8, 0xf3, 0x47, 0xa3, 0x78, 0x08, 0xc2, 0x46, 0xa7, 0x11, 0xc4, 0x83, 0x5e, - 0x56, 0xd1, 0x17, 0x10, 0x85, 0x95, 0xc1, 0x14, 0x9c, 0xb0, 0xc3, 0x0e, 0x62, 0x3a, 0x7f, 0xeb, - 0xca, 0xba, 0xa6, 0xb6, 0x29, 0xba, 0x9b, 0x8f, 0xe0, 0x63, 0xf8, 0x10, 0x3e, 0x80, 0xc7, 0x1d, - 0x77, 0x74, 0xdd, 0xc5, 0xe3, 0x1e, 0x41, 0x9a, 0xd0, 0x2d, 0x9b, 0x1d, 0x78, 0x6a, 0xfa, 0xf9, - 0x7e, 0xf3, 0xf9, 0xd1, 0xa4, 0xf8, 0x20, 0xec, 0xbb, 0x96, 0xcf, 0xdd, 0x30, 0xe2, 0x82, 0x5b, - 0x5e, 0xf0, 0x08, 0x2f, 0x2e, 0x13, 0xf0, 0xcc, 0x86, 0x55, 0x89, 0xc8, 0x8e, 0xce, 0x42, 0xc7, - 0xdc, 0x75, 0xb9, 0xcb, 0x55, 0x3b, 0x5b, 0xa9, 0x96, 0x59, 0x59, 0xd2, 0xe4, 0x0b, 0x15, 0x9e, - 0x7d, 0xfc, 0xc3, 0xdb, 0xf5, 0xcc, 0x62, 0x2b, 0x0b, 0xa9, 0x63, 0x7c, 0x9b, 0x40, 0x34, 0x94, - 0x90, 0x54, 0xaa, 0xf3, 0xfe, 0x82, 0x36, 0xe1, 0x29, 0x81, 0x58, 0x98, 0x7b, 0xc5, 0x61, 0x1c, - 0xf2, 0x20, 0x86, 0x53, 0x44, 0x1a, 0x78, 0xcb, 0x06, 0x71, 0xd5, 0x4b, 0x82, 0x7e, 0x13, 0xba, - 0x44, 0xab, 0x6b, 0x38, 0x97, 0xed, 0xaf, 0x49, 0x95, 0xed, 0xd0, 0x20, 0x35, 0xbc, 0x69, 0x83, - 0xb8, 0x83, 0xc8, 0x83, 0x98, 0x98, 0x4b, 0x6d, 0x05, 0x73, 0x53, 0xa5, 0x30, 0x9b, 0x7b, 0xee, - 0x71, 0xa9, 0xc1, 0x1c, 0xf0, 0x6f, 0xd8, 0x00, 0xe2, 0x1a, 0x8f, 0xae, 0x41, 0x44, 0x5e, 0x27, - 0x7b, 0x23, 0xc7, 0x8b, 0x9d, 0x6b, 0x2a, 0xf9, 0x8c, 0xd2, 0x4a, 0x53, 0xf3, 0x3f, 0xe0, 0xb2, - 0x44, 0x2d, 0xe6, 0x27, 0xab, 0x03, 0x4e, 0x56, 0xb6, 0x15, 0x74, 0xfe, 0x30, 0xc1, 0xc6, 0x1b, - 0xd9, 0x87, 0x09, 0x26, 0x62, 0xfd, 0x82, 0xe4, 0xf1, 0x4b, 0x5a, 0x70, 0x41, 0x7a, 0x38, 0x17, - 0x5d, 0xc8, 0x23, 0x6d, 0x71, 0x3f, 0x19, 0x00, 0xd1, 0x06, 0x2a, 0x92, 0x5b, 0xca, 0xbf, 0x83, - 0xdc, 0x70, 0xd9, 0x1e, 0x4d, 0xa8, 0x31, 0x9e, 0x50, 0x63, 0x36, 0xa1, 0xe8, 0x35, 0xa5, 0xe8, - 0x3d, 0xa5, 0xe8, 0x33, 0xa5, 0x68, 0x94, 0x52, 0xf4, 0x95, 0x52, 0xf4, 0x9d, 0x52, 0x63, 0x96, - 0x52, 0xf4, 0x36, 0xa5, 0xc6, 0x68, 0x4a, 0x8d, 0xf1, 0x94, 0x1a, 0xed, 0x23, 0xd7, 0x13, 0xbd, - 0xc4, 0xa9, 0x76, 0xf8, 0xc0, 0x72, 0x23, 0xd6, 0x65, 0x01, 0xb3, 0x7c, 0xde, 0xf7, 0x2c, 0xfd, - 0x4f, 0x75, 0xfe, 0xcb, 0xc7, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0xe4, 0x24, 0x34, - 0x07, 0x03, 0x00, 0x00, + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xb1, 0x4e, 0xc2, 0x40, + 0x18, 0xc7, 0xef, 0x12, 0x63, 0xf4, 0x34, 0x0e, 0xb7, 0x40, 0x40, 0xcf, 0xc4, 0x38, 0xe8, 0x42, + 0x8d, 0xbe, 0x80, 0xd1, 0x84, 0x86, 0x04, 0x4d, 0xc4, 0x84, 0x81, 0xc1, 0x78, 0xc5, 0x8f, 0xd2, + 0x50, 0x7a, 0xb5, 0xbd, 0x46, 0xd9, 0x7c, 0x04, 0x1f, 0xc3, 0x87, 0xf0, 0x01, 0x1c, 0x19, 0x19, + 0xe5, 0x58, 0x1c, 0x79, 0x04, 0xc3, 0x35, 0x85, 0x03, 0x4b, 0xe2, 0x04, 0xfd, 0xfd, 0x7f, 0xdf, + 0xff, 0x4b, 0xef, 0x4a, 0x0e, 0xc3, 0x9e, 0x6b, 0xf9, 0xc2, 0x0d, 0x23, 0x21, 0x85, 0xe5, 0x05, + 0x4f, 0xf0, 0xea, 0x72, 0x09, 0x2f, 0x7c, 0x50, 0xd1, 0x88, 0xee, 0x99, 0x2c, 0x74, 0x4a, 0xe5, + 0xa5, 0x81, 0xec, 0x4f, 0x2a, 0x9f, 0x7f, 0x6e, 0x90, 0xdd, 0xda, 0xcc, 0xb7, 0x53, 0x9f, 0xd6, + 0x08, 0xb9, 0x4b, 0x20, 0x1a, 0x68, 0x48, 0xcb, 0x95, 0xb9, 0xbf, 0xa0, 0x0d, 0x78, 0x4e, 0x20, + 0x96, 0xa5, 0xfd, 0xfc, 0x30, 0x0e, 0x45, 0x10, 0xc3, 0x19, 0xa6, 0x75, 0xb2, 0x63, 0x83, 0xbc, + 0xee, 0x26, 0x41, 0xaf, 0x01, 0x1d, 0x6a, 0xe8, 0x06, 0xce, 0xca, 0x0e, 0xd6, 0xa4, 0x69, 0xdb, + 0x11, 0xa2, 0x55, 0xb2, 0x6d, 0x83, 0xbc, 0x87, 0xc8, 0x83, 0x98, 0x96, 0x96, 0xec, 0x14, 0x66, + 0x4d, 0xe5, 0xdc, 0x6c, 0xde, 0xf3, 0x40, 0x0a, 0x75, 0xee, 0x80, 0x7f, 0xcb, 0xfb, 0x10, 0x57, + 0x45, 0x74, 0x03, 0x32, 0xf2, 0xda, 0xb3, 0x27, 0x7a, 0xb2, 0x98, 0x5c, 0xa3, 0x64, 0x3b, 0x0a, + 0x2b, 0xa6, 0xd1, 0xff, 0x48, 0x8a, 0x1a, 0x35, 0xb9, 0x9f, 0xac, 0x2e, 0x38, 0x5d, 0x19, 0xcb, + 0x71, 0xfe, 0xb1, 0xc1, 0x26, 0x5b, 0xb3, 0x17, 0x93, 0x5c, 0xc6, 0xe6, 0x05, 0xe9, 0xe3, 0xd7, + 0x34, 0xe7, 0x82, 0xcc, 0x70, 0x5e, 0x74, 0xa9, 0x8f, 0xb4, 0x29, 0xfc, 0xa4, 0x0f, 0xd4, 0x58, + 0x98, 0x92, 0xac, 0xa5, 0xf8, 0x37, 0xc8, 0x1a, 0xae, 0x5a, 0xc3, 0x31, 0x43, 0xa3, 0x31, 0x43, + 0xd3, 0x31, 0xc3, 0x6f, 0x8a, 0xe1, 0x0f, 0xc5, 0xf0, 0x97, 0x62, 0x78, 0xa8, 0x18, 0xfe, 0x56, + 0x0c, 0xff, 0x28, 0x86, 0xa6, 0x8a, 0xe1, 0xf7, 0x09, 0x43, 0xc3, 0x09, 0x43, 0xa3, 0x09, 0x43, + 0xad, 0x63, 0xd7, 0x93, 0xdd, 0xc4, 0xa9, 0xb4, 0x45, 0xdf, 0x72, 0x23, 0xde, 0xe1, 0x01, 0xb7, + 0x7c, 0xd1, 0xf3, 0x2c, 0xf3, 0x4b, 0x75, 0x36, 0xf5, 0xcf, 0xc5, 0x6f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x7a, 0x1a, 0x28, 0xb4, 0xf1, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/pkg/logproto/indexgateway.proto b/pkg/logproto/indexgateway.proto index 9271ee9b2b5f4..af34e03a279cb 100644 --- a/pkg/logproto/indexgateway.proto +++ b/pkg/logproto/indexgateway.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package indexgatewaypb; -import "gogoproto/gogo.proto"; import "pkg/logproto/logproto.proto"; option go_package = "github.com/grafana/loki/pkg/logproto"; diff --git a/pkg/logproto/sketch.pb.go b/pkg/logproto/sketch.pb.go index 4a56552d984e8..c555d64d55970 100644 --- a/pkg/logproto/sketch.pb.go +++ b/pkg/logproto/sketch.pb.go @@ -7,7 +7,6 @@ import ( bytes "bytes" encoding_binary "encoding/binary" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" io "io" math "math" @@ -657,47 +656,46 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/sketch.proto", fileDescriptor_7f9fd40e59b87ff3) } var fileDescriptor_7f9fd40e59b87ff3 = []byte{ - // 632 bytes of a gzipped FileDescriptorProto + // 623 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x4f, 0xd4, 0x4e, - 0x14, 0xef, 0xfc, 0x77, 0xff, 0xcb, 0xf2, 0x16, 0x88, 0x8e, 0xc4, 0xac, 0xc5, 0x4c, 0xd6, 0xc6, + 0x14, 0xef, 0xfc, 0x77, 0xff, 0xcb, 0xf2, 0x16, 0x88, 0x8e, 0xc4, 0xd4, 0xc5, 0x4c, 0xd6, 0xc6, 0x28, 0xd1, 0xb8, 0x9b, 0x40, 0x42, 0x38, 0x83, 0x07, 0x12, 0x45, 0x71, 0x20, 0xc6, 0x70, 0x31, - 0xa5, 0x1d, 0xba, 0x93, 0x6d, 0x3b, 0x4d, 0x67, 0x16, 0xf0, 0xe6, 0x27, 0x30, 0xc6, 0x4f, 0xe1, - 0xd5, 0x8f, 0xe0, 0xcd, 0x23, 0x47, 0x8e, 0x52, 0x2e, 0x1e, 0xf9, 0x08, 0x66, 0x66, 0xda, 0x85, - 0x2e, 0x31, 0x7a, 0xda, 0xf7, 0x7e, 0xef, 0xf7, 0x7e, 0xf3, 0x9b, 0x79, 0x7d, 0x0b, 0xf7, 0xb2, - 0x51, 0x34, 0x88, 0x45, 0x94, 0xe5, 0x42, 0x89, 0x81, 0x1c, 0x31, 0x15, 0x0c, 0xfb, 0x26, 0xc1, - 0xed, 0x0a, 0x76, 0x17, 0x23, 0x11, 0x09, 0xcb, 0xd0, 0x91, 0xad, 0xbb, 0x4b, 0xb5, 0xd6, 0x2a, - 0xb0, 0x45, 0xef, 0x15, 0x2c, 0xbe, 0x19, 0xfb, 0xa9, 0xe2, 0x31, 0xdb, 0x35, 0xa2, 0xdb, 0xbe, - 0xca, 0xf9, 0x09, 0x5e, 0x83, 0xd6, 0x91, 0x1f, 0x8f, 0x99, 0xec, 0xa2, 0x5e, 0x63, 0xb9, 0xb3, - 0x42, 0xfa, 0x93, 0xc6, 0x3a, 0xff, 0x2d, 0x0b, 0x94, 0xc8, 0x69, 0xc9, 0xf6, 0x76, 0xa6, 0xf5, - 0x6c, 0x1d, 0xaf, 0xc3, 0x8c, 0xf4, 0x93, 0x2c, 0xfe, 0xbb, 0xe0, 0xae, 0xa1, 0xd1, 0x8a, 0xee, - 0x7d, 0x42, 0xd3, 0x92, 0x96, 0x81, 0x1f, 0x01, 0x3a, 0xec, 0xa2, 0x1e, 0x5a, 0xee, 0xac, 0x74, - 0xff, 0x24, 0x46, 0xd1, 0x21, 0x7e, 0x00, 0x73, 0x8a, 0x27, 0x4c, 0x2a, 0x3f, 0xc9, 0xde, 0x27, - 0xb2, 0xfb, 0x5f, 0x0f, 0x2d, 0x37, 0x68, 0x67, 0x82, 0x6d, 0x4b, 0xfc, 0x14, 0x5a, 0x09, 0x53, - 0x39, 0x0f, 0xba, 0x0d, 0x63, 0xee, 0xce, 0x95, 0xde, 0x4b, 0xff, 0x80, 0xc5, 0x3b, 0x3e, 0xcf, - 0x69, 0x49, 0xf1, 0x22, 0x58, 0xa8, 0x1f, 0x82, 0x9f, 0xc1, 0x8c, 0x0a, 0x79, 0xc4, 0xa4, 0x2a, - 0xfd, 0xdc, 0xbe, 0xea, 0xdf, 0x7b, 0x6e, 0x0a, 0x5b, 0x0e, 0xad, 0x38, 0xf8, 0x3e, 0xb4, 0xc3, - 0xd0, 0x8e, 0xd0, 0x98, 0x99, 0xdb, 0x72, 0xe8, 0x04, 0xd9, 0x68, 0x43, 0xcb, 0x46, 0xde, 0x77, - 0x04, 0x33, 0x65, 0x3b, 0xbe, 0x05, 0x8d, 0x84, 0xa7, 0x46, 0x1e, 0x51, 0x1d, 0x1a, 0xc4, 0x3f, - 0x31, 0x02, 0x1a, 0xf1, 0x4f, 0x70, 0x0f, 0x3a, 0x81, 0x48, 0xb2, 0x9c, 0x49, 0xc9, 0x45, 0xda, - 0x6d, 0x98, 0xca, 0x75, 0x08, 0xaf, 0xc3, 0x6c, 0x96, 0x8b, 0x80, 0x49, 0xc9, 0xc2, 0x6e, 0xd3, - 0x5c, 0xd5, 0xbd, 0x61, 0xb5, 0xbf, 0xc9, 0x52, 0x95, 0x0b, 0x1e, 0xd2, 0x2b, 0xb2, 0xbb, 0x06, - 0xed, 0x0a, 0xc6, 0x18, 0x9a, 0x09, 0xf3, 0x2b, 0x33, 0x26, 0xc6, 0x77, 0xa1, 0x75, 0xcc, 0x78, - 0x34, 0x54, 0xa5, 0xa1, 0x32, 0xf3, 0xde, 0xc1, 0xc2, 0xa6, 0x18, 0xa7, 0x6a, 0x9b, 0xa7, 0xe5, - 0x63, 0x2d, 0xc2, 0xff, 0x21, 0xcb, 0xd4, 0xd0, 0xb4, 0xcf, 0x53, 0x9b, 0x68, 0xf4, 0x98, 0x87, - 0xca, 0x3e, 0xc8, 0x3c, 0xb5, 0x09, 0x76, 0xa1, 0x1d, 0xe8, 0x6e, 0x96, 0x4b, 0x33, 0x99, 0x79, - 0x3a, 0xc9, 0xbd, 0x6f, 0x08, 0x9a, 0x7b, 0x22, 0x7b, 0x81, 0x9f, 0x40, 0x23, 0x48, 0xe4, 0xcd, - 0x2f, 0xa1, 0x7e, 0x2e, 0xd5, 0x24, 0xfc, 0x18, 0x9a, 0x31, 0x97, 0xda, 0xe4, 0xd4, 0x98, 0xb5, - 0x52, 0xdf, 0x8c, 0xd9, 0x10, 0xf4, 0x5b, 0x0e, 0x3f, 0x64, 0x2c, 0x8f, 0x45, 0x14, 0x8b, 0xc8, - 0xbc, 0xe5, 0x1c, 0xbd, 0x0e, 0xb9, 0x2b, 0xd0, 0xd4, 0x7c, 0xed, 0x9c, 0x1d, 0xb1, 0xd4, 0x8e, - 0x7e, 0x96, 0xda, 0x44, 0xa3, 0xc6, 0x69, 0x75, 0x1f, 0x93, 0x78, 0x5f, 0x10, 0x80, 0x3e, 0xa9, - 0x5c, 0xb2, 0xd5, 0xa9, 0x25, 0x5b, 0xaa, 0xfb, 0xb1, 0xac, 0x7e, 0x7d, 0xc3, 0xdc, 0xd7, 0xd0, - 0x2a, 0x77, 0xca, 0x83, 0xa6, 0x12, 0xd9, 0xa8, 0xbc, 0xf9, 0x42, 0xbd, 0x99, 0x9a, 0xda, 0x3f, - 0x7c, 0xfc, 0x1b, 0xfb, 0xa7, 0xe7, 0xc4, 0x39, 0x3b, 0x27, 0xce, 0xe5, 0x39, 0x41, 0x1f, 0x0b, - 0x82, 0xbe, 0x16, 0x04, 0xfd, 0x28, 0x08, 0x3a, 0x2d, 0x08, 0xfa, 0x59, 0x10, 0xf4, 0xab, 0x20, - 0xce, 0x65, 0x41, 0xd0, 0xe7, 0x0b, 0xe2, 0x9c, 0x5e, 0x10, 0xe7, 0xec, 0x82, 0x38, 0xfb, 0x0f, - 0x23, 0xae, 0x86, 0xe3, 0x83, 0x7e, 0x20, 0x92, 0x41, 0x94, 0xfb, 0x87, 0x7e, 0xea, 0x0f, 0x62, - 0x31, 0xe2, 0x83, 0xeb, 0xff, 0x36, 0x07, 0x2d, 0xf3, 0xb3, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, - 0xa9, 0x7c, 0xb5, 0x30, 0xbf, 0x04, 0x00, 0x00, + 0x43, 0x3b, 0x74, 0x27, 0xdb, 0x76, 0x9a, 0xce, 0x2c, 0xe0, 0xcd, 0x4f, 0x60, 0x8c, 0x9f, 0xc2, + 0xab, 0x1f, 0xc1, 0x9b, 0x47, 0x8e, 0x1c, 0xa5, 0x5c, 0x3c, 0xf2, 0x11, 0xcc, 0x4c, 0xdb, 0x85, + 0x2e, 0x31, 0x7a, 0xda, 0x79, 0xbf, 0xf7, 0x7b, 0xbf, 0xf9, 0xcd, 0x7b, 0x7d, 0x0b, 0xf7, 0xd2, + 0x51, 0x38, 0x88, 0x64, 0x98, 0x66, 0x52, 0xcb, 0x81, 0x1a, 0x71, 0xed, 0x0f, 0xfb, 0x36, 0xc0, + 0xed, 0x0a, 0xee, 0x2e, 0xd5, 0x48, 0xd5, 0xa1, 0xa0, 0x79, 0xaf, 0x60, 0xf1, 0xcd, 0x98, 0x25, + 0x5a, 0x44, 0x7c, 0xd7, 0x96, 0x6f, 0x33, 0x9d, 0x89, 0x13, 0xbc, 0x06, 0xad, 0x23, 0x16, 0x8d, + 0xb9, 0x72, 0x51, 0xaf, 0xb1, 0xdc, 0x59, 0x21, 0xfd, 0x49, 0x61, 0x9d, 0xff, 0x96, 0xfb, 0x5a, + 0x66, 0xb4, 0x64, 0x7b, 0x3b, 0xd3, 0x7a, 0x45, 0x1e, 0xaf, 0xc3, 0x8c, 0x62, 0x71, 0x1a, 0xfd, + 0x5d, 0x70, 0xd7, 0xd2, 0x68, 0x45, 0xf7, 0x3e, 0xa1, 0x69, 0xc9, 0x82, 0x81, 0x1f, 0x01, 0x3a, + 0x74, 0x51, 0x0f, 0x2d, 0x77, 0x56, 0xdc, 0x3f, 0x89, 0x51, 0x74, 0x88, 0x1f, 0xc0, 0x9c, 0x16, + 0x31, 0x57, 0x9a, 0xc5, 0xe9, 0xfb, 0x58, 0xb9, 0xff, 0xf5, 0xd0, 0x72, 0x83, 0x76, 0x26, 0xd8, + 0xb6, 0xc2, 0x4f, 0xa1, 0x15, 0x73, 0x9d, 0x09, 0xdf, 0x6d, 0x58, 0x73, 0x77, 0xae, 0xf4, 0x5e, + 0xb2, 0x03, 0x1e, 0xed, 0x30, 0x91, 0xd1, 0x92, 0xe2, 0x85, 0xb0, 0x50, 0xbf, 0x04, 0x3f, 0x83, + 0x19, 0x1d, 0x88, 0x90, 0x2b, 0x5d, 0xfa, 0xb9, 0x7d, 0x55, 0xbf, 0xf7, 0xdc, 0x26, 0xb6, 0x1c, + 0x5a, 0x71, 0xf0, 0x7d, 0x68, 0x07, 0x41, 0x31, 0x2c, 0x6b, 0x66, 0x6e, 0xcb, 0xa1, 0x13, 0x64, + 0xa3, 0x0d, 0xad, 0xe2, 0xe4, 0x7d, 0x47, 0x30, 0x53, 0x96, 0xe3, 0x5b, 0xd0, 0x88, 0x45, 0x62, + 0xe5, 0x11, 0x35, 0x47, 0x8b, 0xb0, 0x13, 0x2b, 0x60, 0x10, 0x76, 0x82, 0x7b, 0xd0, 0xf1, 0x65, + 0x9c, 0x66, 0x5c, 0x29, 0x21, 0x13, 0xb7, 0x61, 0x33, 0xd7, 0x21, 0xbc, 0x0e, 0xb3, 0x69, 0x26, + 0x7d, 0xae, 0x14, 0x0f, 0xdc, 0xa6, 0x7d, 0x6a, 0xf7, 0x86, 0xd5, 0xfe, 0x26, 0x4f, 0x74, 0x26, + 0x45, 0x40, 0xaf, 0xc8, 0xdd, 0x35, 0x68, 0x57, 0x30, 0xc6, 0xd0, 0x8c, 0x39, 0xab, 0xcc, 0xd8, + 0x33, 0xbe, 0x0b, 0xad, 0x63, 0x2e, 0xc2, 0xa1, 0x2e, 0x0d, 0x95, 0x91, 0xf7, 0x0e, 0x16, 0x36, + 0xe5, 0x38, 0xd1, 0xdb, 0x22, 0x29, 0x9b, 0xb5, 0x08, 0xff, 0x07, 0x3c, 0xd5, 0x43, 0x5b, 0x3e, + 0x4f, 0x8b, 0xc0, 0xa0, 0xc7, 0x22, 0xd0, 0x45, 0x43, 0xe6, 0x69, 0x11, 0xe0, 0x2e, 0xb4, 0x7d, + 0x53, 0xcd, 0x33, 0x65, 0x27, 0x33, 0x4f, 0x27, 0xb1, 0xf7, 0x0d, 0x41, 0x73, 0x4f, 0xa6, 0x2f, + 0xf0, 0x13, 0x68, 0xf8, 0xb1, 0xba, 0xf9, 0x25, 0xd4, 0xef, 0xa5, 0x86, 0x84, 0x1f, 0x43, 0x33, + 0x12, 0xca, 0x98, 0x9c, 0x1a, 0xb3, 0x51, 0xea, 0xdb, 0x31, 0x5b, 0x82, 0xe9, 0xe5, 0xf0, 0x43, + 0xca, 0xb3, 0x48, 0x86, 0x91, 0x0c, 0x6d, 0x2f, 0xe7, 0xe8, 0x75, 0xa8, 0xbb, 0x02, 0x4d, 0xc3, + 0x37, 0xce, 0xf9, 0x11, 0x4f, 0x8a, 0xd1, 0xcf, 0xd2, 0x22, 0x30, 0xa8, 0x75, 0x5a, 0xbd, 0xc7, + 0x06, 0xde, 0x17, 0x04, 0x60, 0x6e, 0x2a, 0x97, 0x6c, 0x75, 0x6a, 0xc9, 0x96, 0xea, 0x7e, 0x0a, + 0x56, 0xbf, 0xbe, 0x61, 0xdd, 0xd7, 0xd0, 0x2a, 0x77, 0xca, 0x83, 0xa6, 0x96, 0xe9, 0xa8, 0x7c, + 0xf9, 0x42, 0xbd, 0x98, 0xda, 0xdc, 0x3f, 0x7c, 0xfc, 0x1b, 0xfb, 0xa7, 0xe7, 0xc4, 0x39, 0x3b, + 0x27, 0xce, 0xe5, 0x39, 0x41, 0x1f, 0x73, 0x82, 0xbe, 0xe6, 0x04, 0xfd, 0xc8, 0x09, 0x3a, 0xcd, + 0x09, 0xfa, 0x99, 0x13, 0xf4, 0x2b, 0x27, 0xce, 0x65, 0x4e, 0xd0, 0xe7, 0x0b, 0xe2, 0x9c, 0x5e, + 0x10, 0xe7, 0xec, 0x82, 0x38, 0xfb, 0x0f, 0x43, 0xa1, 0x87, 0xe3, 0x83, 0xbe, 0x2f, 0xe3, 0x41, + 0x98, 0xb1, 0x43, 0x96, 0xb0, 0x41, 0x24, 0x47, 0x62, 0x70, 0xfd, 0xdf, 0xe6, 0xa0, 0x65, 0x7f, + 0x56, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x24, 0x9c, 0x74, 0xb7, 0xa9, 0x04, 0x00, 0x00, } func (this *QuantileSketchMatrix) Equal(that interface{}) bool { diff --git a/pkg/logproto/sketch.proto b/pkg/logproto/sketch.proto index e84deaf20d4c8..d8ffeb0110340 100644 --- a/pkg/logproto/sketch.proto +++ b/pkg/logproto/sketch.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package logproto; -import "gogoproto/gogo.proto"; import "pkg/logproto/logproto.proto"; option go_package = "github.com/grafana/loki/pkg/logproto"; diff --git a/pkg/logql/blocker.go b/pkg/logql/blocker.go index cbfdc6bf49e3b..9a07113c40dd3 100644 --- a/pkg/logql/blocker.go +++ b/pkg/logql/blocker.go @@ -33,8 +33,8 @@ func (qb *queryBlocker) isBlocked(ctx context.Context, tenant string) bool { return false } - query := qb.q.params.Query() - typ, err := QueryType(query) + query := qb.q.params.QueryString() + typ, err := QueryType(qb.q.params.GetExpression()) if err != nil { typ = "unknown" } diff --git a/pkg/logql/blocker_test.go b/pkg/logql/blocker_test.go index e0dc00bf622e7..9fa586a02db80 100644 --- a/pkg/logql/blocker_test.go +++ b/pkg/logql/blocker_test.go @@ -145,15 +145,10 @@ func TestEngine_ExecWithBlockedQueries(t *testing.T) { t.Run(test.name, func(t *testing.T) { limits.blockedQueries = test.blocked - q := eng.Query(LiteralParams{ - qs: test.q, - start: time.Unix(0, 0), - end: time.Unix(100000, 0), - step: 60 * time.Second, - direction: logproto.FORWARD, - limit: 1000, - }) - _, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) + params, err := NewLiteralParams(test.q, time.Unix(0, 0), time.Unix(100000, 0), 60*time.Second, 0, logproto.FORWARD, 1000, nil) + require.NoError(t, err) + q := eng.Query(params) + _, err = q.Exec(user.InjectOrgID(context.Background(), "fake")) if test.expectedErr == nil { require.NoError(t, err) diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go index 3944b4fc492a7..2cd706c812f6a 100644 --- a/pkg/logql/downstream.go +++ b/pkg/logql/downstream.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/grafana/loki/pkg/iter" + "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/metadata" @@ -62,15 +63,12 @@ func NewDownstreamEngine(opts EngineOpts, downstreamable Downstreamable, limits func (ng *DownstreamEngine) Opts() EngineOpts { return ng.opts } // Query constructs a Query -func (ng *DownstreamEngine) Query(ctx context.Context, p Params, mapped syntax.Expr) Query { +func (ng *DownstreamEngine) Query(ctx context.Context, p Params) Query { return &query{ logger: ng.logger, params: p, evaluator: NewDownstreamEvaluator(ng.downstreamable.Downstreamer(ctx)), - parse: func(_ context.Context, _ string) (syntax.Expr, error) { - return mapped, nil - }, - limits: ng.limits, + limits: ng.limits, } } @@ -189,9 +187,7 @@ type Downstreamable interface { } type DownstreamQuery struct { - Expr syntax.Expr Params Params - Shards Shards } // Downstreamer is an interface for deferring responsibility for query execution. @@ -268,9 +264,10 @@ func (ev *DownstreamEvaluator) NewStepEvaluator( shards = append(shards, *e.shard) } results, err := ev.Downstream(ctx, []DownstreamQuery{{ - Expr: e.SampleExpr, - Params: params, - Shards: shards, + Params: ParamsWithShardsOverride{ + Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: e.SampleExpr}, + ShardsOverride: Shards(shards).Encode(), + }, }}) if err != nil { return nil, err @@ -282,11 +279,10 @@ func (ev *DownstreamEvaluator) NewStepEvaluator( var queries []DownstreamQuery for cur != nil { qry := DownstreamQuery{ - Expr: cur.DownstreamSampleExpr.SampleExpr, - Params: params, + Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: cur.DownstreamSampleExpr.SampleExpr}, } if shard := cur.DownstreamSampleExpr.shard; shard != nil { - qry.Shards = Shards{*shard} + qry.Params = ParamsWithShardsOverride{Params: qry.Params, ShardsOverride: Shards{*shard}.Encode()} } queries = append(queries, qry) cur = cur.next @@ -304,7 +300,7 @@ func (ev *DownstreamEvaluator) NewStepEvaluator( level.Warn(util_log.Logger).Log( "msg", "could not extract StepEvaluator", "err", err, - "expr", queries[i].Expr.String(), + "expr", queries[i].Params.GetExpression().String(), ) return nil, err } @@ -332,25 +328,25 @@ func (ev *DownstreamEvaluator) NewIterator( shards = append(shards, *e.shard) } results, err := ev.Downstream(ctx, []DownstreamQuery{{ - Expr: e.LogSelectorExpr, - Params: params, - Shards: shards, + Params: ParamsWithShardsOverride{ + Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: e.LogSelectorExpr}, + ShardsOverride: shards.Encode(), + }, }}) if err != nil { return nil, err } - return ResultIterator(results[0], params) + return ResultIterator(results[0], params.Direction()) case *ConcatLogSelectorExpr: cur := e var queries []DownstreamQuery for cur != nil { qry := DownstreamQuery{ - Expr: cur.DownstreamLogSelectorExpr.LogSelectorExpr, - Params: params, + Params: ParamsWithExpressionOverride{Params: params, ExpressionOverride: cur.DownstreamLogSelectorExpr.LogSelectorExpr}, } if shard := cur.DownstreamLogSelectorExpr.shard; shard != nil { - qry.Shards = Shards{*shard} + qry.Params = ParamsWithShardsOverride{Params: qry.Params, ShardsOverride: Shards{*shard}.Encode()} } queries = append(queries, qry) cur = cur.next @@ -363,12 +359,12 @@ func (ev *DownstreamEvaluator) NewIterator( xs := make([]iter.EntryIterator, 0, len(results)) for i, res := range results { - iter, err := ResultIterator(res, params) + iter, err := ResultIterator(res, params.Direction()) if err != nil { level.Warn(util_log.Logger).Log( "msg", "could not extract Iterator", "err", err, - "expr", queries[i].Expr.String(), + "expr", queries[i].Params.GetExpression().String(), ) } xs = append(xs, iter) @@ -452,10 +448,10 @@ func NewResultStepEvaluator(res logqlmodel.Result, params Params) (StepEvaluator } // ResultIterator coerces a downstream streams result into an iter.EntryIterator -func ResultIterator(res logqlmodel.Result, params Params) (iter.EntryIterator, error) { +func ResultIterator(res logqlmodel.Result, direction logproto.Direction) (iter.EntryIterator, error) { streams, ok := res.Data.(logqlmodel.Streams) if !ok { return nil, fmt.Errorf("unexpected type (%s) for ResultIterator; expected %s", res.Data.Type(), logqlmodel.ValueTypeStreams) } - return iter.NewStreamsIterator(streams, params.Direction()), nil + return iter.NewStreamsIterator(streams, direction), nil } diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go index c5f54b9e1c056..0f4d1cd09984d 100644 --- a/pkg/logql/downstream_test.go +++ b/pkg/logql/downstream_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" ) var nilShardMetrics = NewShardMapperMetrics(nil) @@ -69,7 +70,7 @@ func TestMappingEquivalence(t *testing.T) { sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger()) t.Run(tc.query, func(t *testing.T) { - params := NewLiteralParams( + params, err := NewLiteralParams( tc.query, start, end, @@ -79,14 +80,16 @@ func TestMappingEquivalence(t *testing.T) { uint32(limit), nil, ) + require.NoError(t, err) + qry := regular.Query(params) ctx := user.InjectOrgID(context.Background(), "fake") mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics) - _, _, mapped, err := mapper.Parse(tc.query) + _, _, mapped, err := mapper.Parse(params.GetExpression()) require.Nil(t, err) - shardedQry := sharded.Query(ctx, params, mapped) + shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{Params: params, ExpressionOverride: mapped}) res, err := qry.Exec(ctx) require.Nil(t, err) @@ -135,7 +138,7 @@ func TestShardCounter(t *testing.T) { sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger()) t.Run(tc.query, func(t *testing.T) { - params := NewLiteralParams( + params, err := NewLiteralParams( tc.query, start, end, @@ -145,13 +148,14 @@ func TestShardCounter(t *testing.T) { uint32(limit), nil, ) + require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), "fake") mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics) - noop, _, mapped, err := mapper.Parse(tc.query) - require.Nil(t, err) + noop, _, mapped, err := mapper.Parse(params.GetExpression()) + require.NoError(t, err) - shardedQry := sharded.Query(ctx, params, mapped) + shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{Params: params, ExpressionOverride: mapped}) shardedRes, err := shardedQry.Exec(ctx) require.Nil(t, err) @@ -393,7 +397,7 @@ func TestRangeMappingEquivalence(t *testing.T) { t.Run(tc.query, func(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "fake") - params := NewLiteralParams( + params, err := NewLiteralParams( tc.query, start, end, @@ -403,6 +407,7 @@ func TestRangeMappingEquivalence(t *testing.T) { uint32(limit), nil, ) + require.NoError(t, err) // Regular engine qry := regularEngine.Query(params) @@ -412,12 +417,12 @@ func TestRangeMappingEquivalence(t *testing.T) { // Downstream engine - split by range rangeMapper, err := NewRangeMapper(tc.splitByInterval, nilRangeMetrics, NewMapperStats()) require.Nil(t, err) - noop, rangeExpr, err := rangeMapper.Parse(tc.query) + noop, rangeExpr, err := rangeMapper.Parse(syntax.MustParseExpr(tc.query)) require.Nil(t, err) require.False(t, noop, "downstream engine cannot execute noop") - rangeQry := downstreamEngine.Query(ctx, params, rangeExpr) + rangeQry := downstreamEngine.Query(ctx, ParamsWithExpressionOverride{Params: params, ExpressionOverride: rangeExpr}) rangeRes, err := rangeQry.Exec(ctx) require.Nil(t, err) diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index af680a33b9a97..e04cf1dcffa71 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -160,12 +160,9 @@ func NewEngine(opts EngineOpts, q Querier, l Limits, logger log.Logger) *Engine // Query creates a new LogQL query. Instant/Range type is derived from the parameters. func (ng *Engine) Query(params Params) Query { return &query{ - logger: ng.logger, - params: params, - evaluator: ng.evaluatorFactory, - parse: func(_ context.Context, query string) (syntax.Expr, error) { - return syntax.ParseExpr(query) - }, + logger: ng.logger, + params: params, + evaluator: ng.evaluatorFactory, record: true, logExecQuery: ng.opts.LogExecutingQuery, limits: ng.limits, @@ -181,7 +178,6 @@ type Query interface { type query struct { logger log.Logger params Params - parse func(context.Context, string) (syntax.Expr, error) limits Limits evaluator EvaluatorFactory record bool @@ -211,7 +207,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) { sp.LogKV( "type", GetRangeType(q.params), - "query", q.params.Query(), + "query", q.params.QueryString(), "start", q.params.Start(), "end", q.params.End(), "step", q.params.Step(), @@ -219,11 +215,11 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) { ) if q.logExecQuery { - queryHash := util.HashedQuery(q.params.Query()) + queryHash := util.HashedQuery(q.params.QueryString()) if GetRangeType(q.params) == InstantType { - level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "instant", "query", q.params.Query(), "query_hash", queryHash) + level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "instant", "query", q.params.QueryString(), "query_hash", queryHash) } else { - level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "range", "query", q.params.Query(), "length", q.params.End().Sub(q.params.Start()), "step", q.params.Step(), "query_hash", queryHash) + level.Info(logutil.WithContext(ctx, q.logger)).Log("msg", "executing query", "type", "range", "query", q.params.QueryString(), "length", q.params.End().Sub(q.params.Start()), "step", q.params.Step(), "query_hash", queryHash) } } @@ -263,16 +259,11 @@ func (q *query) Eval(ctx context.Context) (promql_parser.Value, error) { ctx, cancel := context.WithTimeout(ctx, queryTimeout) defer cancel() - expr, err := q.parse(ctx, q.params.Query()) - if err != nil { - return nil, err - } - if q.checkBlocked(ctx, tenants) { return nil, logqlmodel.ErrBlocked } - switch e := expr.(type) { + switch e := q.params.GetExpression().(type) { case syntax.SampleExpr: value, err := q.evalSample(ctx, e) return value, err @@ -364,7 +355,7 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_ if GetRangeType(q.params) == InstantType { sortByValue, err := Sortable(q.params) if err != nil { - return nil, fmt.Errorf("fail to check Sortable, logql: %s ,err: %s", q.params.Query(), err) + return nil, fmt.Errorf("fail to check Sortable, logql: %s ,err: %s", q.params.QueryString(), err) } if !sortByValue { sort.Slice(vec, func(i, j int) bool { return labels.Compare(vec[i].Metric, vec[j].Metric) < 0 }) diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index 548400644a31e..e0b6ab3dff2ae 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -129,13 +129,9 @@ func TestEngine_LogsRateUnwrap(t *testing.T) { t.Parallel() eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger()) - q := eng.Query(LiteralParams{ - qs: test.qs, - start: test.ts, - end: test.ts, - direction: test.direction, - limit: test.limit, - }) + params, err := NewLiteralParams(test.qs, test.ts, test.ts, 0, 0, test.direction, test.limit, nil) + require.NoError(t, err) + q := eng.Query(params) res, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) if expectedError, ok := test.expected.(error); ok { assert.Equal(t, expectedError.Error(), err.Error()) @@ -960,13 +956,10 @@ func TestEngine_LogsInstantQuery(t *testing.T) { t.Parallel() eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger()) - q := eng.Query(LiteralParams{ - qs: test.qs, - start: test.ts, - end: test.ts, - direction: test.direction, - limit: test.limit, - }) + + params, err := NewLiteralParams(test.qs, test.ts, test.ts, 0, 0, test.direction, test.limit, nil) + require.NoError(t, err) + q := eng.Query(params) res, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) if expectedError, ok := test.expected.(error); ok { assert.Equal(t, expectedError.Error(), err.Error()) @@ -2266,15 +2259,9 @@ func TestEngine_RangeQuery(t *testing.T) { eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger()) - q := eng.Query(LiteralParams{ - qs: test.qs, - start: test.start, - end: test.end, - step: test.step, - interval: test.interval, - direction: test.direction, - limit: test.limit, - }) + params, err := NewLiteralParams(test.qs, test.start, test.end, test.step, test.interval, test.direction, test.limit, nil) + require.NoError(t, err) + q := eng.Query(params) res, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) if err != nil { t.Fatal(err) @@ -2302,13 +2289,11 @@ func TestEngine_Stats(t *testing.T) { eng := NewEngine(EngineOpts{}, &statsQuerier{}, NoLimits, log.NewNopLogger()) queueTime := 2 * time.Nanosecond - q := eng.Query(LiteralParams{ - qs: `{foo="bar"}`, - start: time.Now(), - end: time.Now(), - direction: logproto.BACKWARD, - limit: 1000, - }) + + params, err := NewLiteralParams(`{foo="bar"}`, time.Now(), time.Now(), 0, 0, logproto.FORWARD, 1000, nil) + require.NoError(t, err) + q := eng.Query(params) + ctx := context.WithValue(context.Background(), httpreq.QueryQueueTimeHTTPHeader, queueTime) r, err := q.Exec(user.InjectOrgID(ctx, "fake")) require.NoError(t, err) @@ -2338,13 +2323,9 @@ func (metaQuerier) SelectSamples(ctx context.Context, _ SelectSampleParams) (ite func TestEngine_Metadata(t *testing.T) { eng := NewEngine(EngineOpts{}, &metaQuerier{}, NoLimits, log.NewNopLogger()) - q := eng.Query(LiteralParams{ - qs: `{foo="bar"}`, - start: time.Now(), - end: time.Now(), - direction: logproto.BACKWARD, - limit: 1000, - }) + params, err := NewLiteralParams(`{foo="bar"}`, time.Now(), time.Now(), 0, 0, logproto.BACKWARD, 1000, nil) + require.NoError(t, err) + q := eng.Query(params) r, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) require.NoError(t, err) @@ -2353,51 +2334,17 @@ func TestEngine_Metadata(t *testing.T) { }, r.Headers) } -func TestEngine_LogsInstantQuery_IllegalLogql(t *testing.T) { - eng := NewEngine(EngineOpts{}, &statsQuerier{}, NoLimits, log.NewNopLogger()) - - queueTime := 2 * time.Nanosecond - illegalVector := `vector(abc)` - q := eng.Query(LiteralParams{ - qs: illegalVector, - start: time.Now(), - end: time.Now(), - step: time.Second * 30, - interval: time.Second * 30, - direction: logproto.BACKWARD, - limit: 1000, - }) - expectErr := logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER, expecting NUMBER", 1, 8) - ctx := context.WithValue(context.Background(), httpreq.QueryQueueTimeHTTPHeader, queueTime) - _, err := q.Exec(user.InjectOrgID(ctx, "fake")) - - require.EqualError(t, err, expectErr.Error()) - - qry, ok := q.(*query) - require.Equal(t, ok, true) - vectorExpr := syntax.NewVectorExpr(illegalVector) - - _, err = qry.evalSample(ctx, vectorExpr) - expectEvalSampleErr := logqlmodel.NewParseError("unable to parse vectorExpr as a float: strconv.ParseFloat: parsing \"vector(abc)\": invalid syntax", 0, 0) - require.EqualError(t, err, expectEvalSampleErr.Error()) -} - func TestEngine_LogsInstantQuery_Vector(t *testing.T) { eng := NewEngine(EngineOpts{}, &statsQuerier{}, NoLimits, log.NewNopLogger()) now := time.Now() queueTime := 2 * time.Nanosecond logqlVector := `vector(5)` - q := eng.Query(LiteralParams{ - qs: logqlVector, - start: now, - end: now, - step: 0, - interval: time.Second * 30, - direction: logproto.BACKWARD, - limit: 1000, - }) + + params, err := NewLiteralParams(logqlVector, now, now, 0, time.Second*30, logproto.BACKWARD, 1000, nil) + require.NoError(t, err) + q := eng.Query(params) ctx := context.WithValue(context.Background(), httpreq.QueryQueueTimeHTTPHeader, queueTime) - _, err := q.Exec(user.InjectOrgID(ctx, "fake")) + _, err = q.Exec(user.InjectOrgID(ctx, "fake")) require.NoError(t, err) @@ -2472,14 +2419,11 @@ func TestStepEvaluator_Error(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { eng := NewEngine(EngineOpts{}, tc.querier, NoLimits, log.NewNopLogger()) - q := eng.Query(LiteralParams{ - qs: tc.qs, - start: time.Unix(0, 0), - end: time.Unix(180, 0), - step: 1 * time.Second, - limit: 1, - }) - _, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) + + params, err := NewLiteralParams(tc.qs, time.Unix(0, 0), time.Unix(180, 0), 1*time.Second, 0, logproto.BACKWARD, 1, nil) + require.NoError(t, err) + q := eng.Query(params) + _, err = q.Exec(user.InjectOrgID(context.Background(), "fake")) require.Equal(t, tc.err, err) }) } @@ -2502,15 +2446,10 @@ func TestEngine_MaxSeries(t *testing.T) { {`avg(count_over_time({app=~"foo|bar"} |~".+bar" [1m]))`, logproto.FORWARD, false}, } { t.Run(test.qs, func(t *testing.T) { - q := eng.Query(LiteralParams{ - qs: test.qs, - start: time.Unix(0, 0), - end: time.Unix(100000, 0), - step: 60 * time.Second, - direction: test.direction, - limit: 1000, - }) - _, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) + params, err := NewLiteralParams(test.qs, time.Unix(0, 0), time.Unix(100000, 0), 60*time.Second, 0, test.direction, 1000, nil) + require.NoError(t, err) + q := eng.Query(params) + _, err = q.Exec(user.InjectOrgID(context.Background(), "fake")) if test.expectLimitErr { require.NotNil(t, err) require.True(t, errors.Is(err, logqlmodel.ErrLimit)) @@ -2534,15 +2473,11 @@ func TestEngine_MaxRangeInterval(t *testing.T) { {`topk(1,rate({app=~"foo|bar"}[12h]) / (rate({app="baz"}[23h]) + rate({app="fiz"}[25h])))`, logproto.FORWARD, true}, } { t.Run(test.qs, func(t *testing.T) { - q := eng.Query(LiteralParams{ - qs: test.qs, - start: time.Unix(0, 0), - end: time.Unix(100000, 0), - step: 60 * time.Second, - direction: test.direction, - limit: 1000, - }) - _, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) + params, err := NewLiteralParams(test.qs, time.Unix(0, 0), time.Unix(100000, 0), 60*time.Second, 0, test.direction, 1000, nil) + require.NoError(t, err) + q := eng.Query(params) + + _, err = q.Exec(user.InjectOrgID(context.Background(), "fake")) if test.expectLimitErr { require.Error(t, err) require.ErrorIs(t, err, logqlmodel.ErrIntervalLimit) @@ -2605,14 +2540,10 @@ func benchmarkRangeQuery(testsize int64, b *testing.B) { {`bottomk(2,rate(({app=~"foo|bar"} |~".+bar")[1m]))`, logproto.FORWARD}, {`bottomk(3,rate(({app=~"foo|bar"} |~".+bar")[1m])) without (app)`, logproto.FORWARD}, } { - q := eng.Query(LiteralParams{ - qs: test.qs, - start: start, - end: end, - step: 60 * time.Second, - direction: test.direction, - limit: 1000, - }) + params, err := NewLiteralParams(test.qs, start, end, 60*time.Second, 0, logproto.BACKWARD, 1000, nil) + require.NoError(b, err) + q := eng.Query(params) + res, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) if err != nil { b.Fatal(err) @@ -2640,8 +2571,13 @@ func TestHashingStability(t *testing.T) { buf := bytes.NewBufferString("") logger := log.NewLogfmtLogger(buf) eng := NewEngine(EngineOpts{LogExecutingQuery: true}, getLocalQuerier(4), NoLimits, logger) + + parsed, err := syntax.ParseExpr(params.QueryString()) + require.NoError(t, err) + params.queryExpr = parsed + query := eng.Query(params) - _, err := query.Exec(ctx) + _, err = query.Exec(ctx) require.NoError(t, err) return buf.String() } @@ -2668,7 +2604,7 @@ func TestHashingStability(t *testing.T) { {`sum by(query_hash) (count_over_time({app="myapp",env="myenv"} |= "error" |= "metrics.go" | logfmt [10s]))`}, {`sum (count_over_time({app="myapp",env="myenv"} |= "error" |= "metrics.go" | logfmt [10s])) by(query_hash)`}, } { - params.qs = test.qs + params.queryString = test.qs expectedQueryHash := util.HashedQuery(test.qs) // check that both places will end up having the same query hash, even though they're emitting different log lines. diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go index 0c0dba2cad3d5..fdb9190956a5b 100644 --- a/pkg/logql/evaluator.go +++ b/pkg/logql/evaluator.go @@ -31,7 +31,7 @@ var ( // Params details the parameters associated with a loki request type Params interface { - Query() string + QueryString() string Start() time.Time End() time.Time Step() time.Duration @@ -39,6 +39,7 @@ type Params interface { Limit() uint32 Direction() logproto.Direction Shards() []string + GetExpression() syntax.Expr } func NewLiteralParams( @@ -48,33 +49,41 @@ func NewLiteralParams( direction logproto.Direction, limit uint32, shards []string, -) LiteralParams { - return LiteralParams{ - qs: qs, - start: start, - end: end, - step: step, - interval: interval, - direction: direction, - limit: limit, - shards: shards, - } +) (LiteralParams, error) { + p := LiteralParams{ + queryString: qs, + start: start, + end: end, + step: step, + interval: interval, + direction: direction, + limit: limit, + shards: shards, + } + var err error + p.queryExpr, err = syntax.ParseExpr(qs) + return p, err + } // LiteralParams impls Params type LiteralParams struct { - qs string + queryString string start, end time.Time step, interval time.Duration direction logproto.Direction limit uint32 shards []string + queryExpr syntax.Expr } func (p LiteralParams) Copy() LiteralParams { return p } // String impls Params -func (p LiteralParams) Query() string { return p.qs } +func (p LiteralParams) QueryString() string { return p.queryString } + +// GetExpression impls Params +func (p LiteralParams) GetExpression() syntax.Expr { return p.queryExpr } // Start impls Params func (p LiteralParams) Start() time.Time { return p.start } @@ -105,12 +114,38 @@ func GetRangeType(q Params) QueryRangeType { return RangeType } +// ParamsWithExpressionOverride overrides the query expression so that the query +// string and the expression can differ. This is useful for for query planning +// when plan my not match externally available logql syntax +type ParamsWithExpressionOverride struct { + Params + ExpressionOverride syntax.Expr +} + +// GetExpression returns the parsed expression of the query. +func (p ParamsWithExpressionOverride) GetExpression() syntax.Expr { + return p.ExpressionOverride +} + +// ParamsWithExpressionOverride overrides the shards. Since the backing +// implementation of the Params interface is unknown they are embedded and the +// original shards are shadowed. +type ParamsWithShardsOverride struct { + Params + ShardsOverride []string +} + +// Shards returns this overwriting shards. +func (p ParamsWithShardsOverride) Shards() []string { + return p.ShardsOverride +} + // Sortable logql contain sort or sort_desc. func Sortable(q Params) (bool, error) { var sortable bool - expr, err := syntax.ParseSampleExpr(q.Query()) - if err != nil { - return false, err + expr, ok := q.GetExpression().(syntax.SampleExpr) + if !ok { + return false, errors.New("only sample expression supported") } expr.Walk(func(e syntax.Expr) { rangeExpr, ok := e.(*syntax.VectorAggregationExpr) diff --git a/pkg/logql/evaluator_test.go b/pkg/logql/evaluator_test.go index 1bec3d9c67d68..e31d587252066 100644 --- a/pkg/logql/evaluator_test.go +++ b/pkg/logql/evaluator_test.go @@ -44,14 +44,14 @@ func TestDefaultEvaluator_DivideByZero(t *testing.T) { } func TestDefaultEvaluator_Sortable(t *testing.T) { logqlSort := `sort(rate(({app=~"foo|bar"} |~".+bar")[1m])) ` - sortable, err := Sortable(LiteralParams{qs: logqlSort}) + sortable, err := Sortable(LiteralParams{queryString: logqlSort, queryExpr: syntax.MustParseExpr(logqlSort)}) if err != nil { t.Fatal(err) } require.Equal(t, true, sortable) logqlSum := `sum(rate(({app=~"foo|bar"} |~".+bar")[1m])) ` - sortableSum, err := Sortable(LiteralParams{qs: logqlSum}) + sortableSum, err := Sortable(LiteralParams{queryString: logqlSum, queryExpr: syntax.MustParseExpr(logqlSum)}) if err != nil { t.Fatal(err) } diff --git a/pkg/logql/explain_test.go b/pkg/logql/explain_test.go index a54ffa5916f2c..5ae2f840e1c88 100644 --- a/pkg/logql/explain_test.go +++ b/pkg/logql/explain_test.go @@ -29,14 +29,14 @@ func TestExplain(t *testing.T) { downEv := &DownstreamEvaluator{Downstreamer: MockDownstreamer{regular}, defaultEvaluator: defaultEv} mapper := NewShardMapper(ConstantShards(4), nilShardMetrics) - _, _, expr, err := mapper.Parse(query) + _, _, expr, err := mapper.Parse(syntax.MustParseExpr(query)) require.NoError(t, err) params := LiteralParams{ - qs: query, - start: time.Unix(60, 0), - end: time.Unix(60, 0), - limit: 1000, + queryString: query, + start: time.Unix(60, 0), + end: time.Unix(60, 0), + limit: 1000, } ev, err := downEv.NewStepEvaluator(ctx, downEv, expr.(syntax.SampleExpr), params) diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go index 94a4c2f9dd408..9db8ee96e4ed4 100644 --- a/pkg/logql/metrics.go +++ b/pkg/logql/metrics.go @@ -98,7 +98,7 @@ func RecordRangeAndInstantQueryMetrics( latencyType = latencyTypeFast returnedLines = 0 ) - queryType, err := QueryType(p.Query()) + queryType, err := QueryType(p.GetExpression()) if err != nil { level.Warn(logger).Log("msg", "error parsing query type", "err", err) } @@ -119,8 +119,8 @@ func RecordRangeAndInstantQueryMetrics( logValues = append(logValues, []interface{}{ "latency", latencyType, // this can be used to filter log lines. - "query", p.Query(), - "query_hash", util.HashedQuery(p.Query()), + "query", p.QueryString(), + "query_hash", util.HashedQuery(p.QueryString()), "query_type", queryType, "range_type", rt, "length", p.End().Sub(p.Start()), @@ -373,11 +373,7 @@ func recordUsageStats(queryType string, stats logql_stats.Result) { } } -func QueryType(query string) (string, error) { - expr, err := syntax.ParseExpr(query) - if err != nil { - return "", err - } +func QueryType(expr syntax.Expr) (string, error) { switch e := expr.(type) { case syntax.SampleExpr: return QueryTypeMetric, nil diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go index 06d4e2699494e..6d07040bb802a 100644 --- a/pkg/logql/metrics_test.go +++ b/pkg/logql/metrics_test.go @@ -16,6 +16,7 @@ import ( "github.com/uber/jaeger-client-go" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/util" @@ -25,30 +26,25 @@ import ( func TestQueryType(t *testing.T) { tests := []struct { - name string - query string - want string - wantErr bool + name string + query string + want string }{ - {"bad", "ddd", "", true}, - {"limited", `{app="foo"}`, QueryTypeLimited, false}, - {"limited multi label", `{app="foo" ,fuzz=~"foo"}`, QueryTypeLimited, false}, - {"limited with parser", `{app="foo" ,fuzz=~"foo"} | logfmt`, QueryTypeLimited, false}, - {"filter", `{app="foo"} |= "foo"`, QueryTypeFilter, false}, - {"filter string extracted label", `{app="foo"} | json | foo="a"`, QueryTypeFilter, false}, - {"filter duration", `{app="foo"} | json | duration > 5s`, QueryTypeFilter, false}, - {"metrics", `rate({app="foo"} |= "foo"[5m])`, QueryTypeMetric, false}, - {"metrics binary", `rate({app="foo"} |= "foo"[5m]) + count_over_time({app="foo"} |= "foo"[5m]) / rate({app="foo"} |= "foo"[5m]) `, QueryTypeMetric, false}, - {"filters", `{app="foo"} |= "foo" |= "f" != "b"`, QueryTypeFilter, false}, - {"filters and labels filters", `{app="foo"} |= "foo" |= "f" != "b" | json | a > 5`, QueryTypeFilter, false}, + {"limited", `{app="foo"}`, QueryTypeLimited}, + {"limited multi label", `{app="foo" ,fuzz=~"foo"}`, QueryTypeLimited}, + {"limited with parser", `{app="foo" ,fuzz=~"foo"} | logfmt`, QueryTypeLimited}, + {"filter", `{app="foo"} |= "foo"`, QueryTypeFilter}, + {"filter string extracted label", `{app="foo"} | json | foo="a"`, QueryTypeFilter}, + {"filter duration", `{app="foo"} | json | duration > 5s`, QueryTypeFilter}, + {"metrics", `rate({app="foo"} |= "foo"[5m])`, QueryTypeMetric}, + {"metrics binary", `rate({app="foo"} |= "foo"[5m]) + count_over_time({app="foo"} |= "foo"[5m]) / rate({app="foo"} |= "foo"[5m]) `, QueryTypeMetric}, + {"filters", `{app="foo"} |= "foo" |= "f" != "b"`, QueryTypeFilter}, + {"filters and labels filters", `{app="foo"} |= "foo" |= "f" != "b" | json | a > 5`, QueryTypeFilter}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := QueryType(tt.query) - if (err != nil) != tt.wantErr { - t.Errorf("QueryType() error = %v, wantErr %v", err, tt.wantErr) - return - } + got, err := QueryType(syntax.MustParseExpr(tt.query)) + require.NoError(t, err) if got != tt.want { t.Errorf("QueryType() = %v, want %v", got, tt.want) } @@ -69,12 +65,13 @@ func TestLogSlowQuery(t *testing.T) { ctx = context.WithValue(ctx, httpreq.QueryTagsHTTPHeader, "Source=logvolhist,Feature=Beta") RecordRangeAndInstantQueryMetrics(ctx, util_log.Logger, LiteralParams{ - qs: `{foo="bar"} |= "buzz"`, - direction: logproto.BACKWARD, - end: now, - start: now.Add(-1 * time.Hour), - limit: 1000, - step: time.Minute, + queryString: `{foo="bar"} |= "buzz"`, + direction: logproto.BACKWARD, + end: now, + start: now.Add(-1 * time.Hour), + limit: 1000, + step: time.Minute, + queryExpr: syntax.MustParseExpr(`{foo="bar"} |= "buzz"`), }, "200", stats.Result{ Summary: stats.Summary{ BytesProcessedPerSecond: 100000, diff --git a/pkg/logql/rangemapper.go b/pkg/logql/rangemapper.go index cc63944bc07e9..250f586603b7e 100644 --- a/pkg/logql/rangemapper.go +++ b/pkg/logql/rangemapper.go @@ -81,10 +81,10 @@ func NewRangeMapperMetrics(registerer prometheus.Registerer) *MapperMetrics { // be executed by the downstream engine. // It returns a boolean indicating whether a rewrite was possible, the // rewritten sample expression, and an error in case the rewrite failed. -func (m RangeMapper) Parse(query string) (bool, syntax.Expr, error) { - origExpr, err := syntax.ParseSampleExpr(query) - if err != nil { - return true, nil, err +func (m RangeMapper) Parse(expr syntax.Expr) (bool, syntax.Expr, error) { + origExpr, ok := expr.(syntax.SampleExpr) + if !ok { + return true, nil, errors.New("only sample expression supported") } recorder := m.metrics.downstreamRecorder() diff --git a/pkg/logql/rangemapper_test.go b/pkg/logql/rangemapper_test.go index 1c2f827867f93..48394d219be1a 100644 --- a/pkg/logql/rangemapper_test.go +++ b/pkg/logql/rangemapper_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/logql/syntax" ) func Test_SplitRangeInterval(t *testing.T) { @@ -83,7 +83,7 @@ func Test_SplitRangeInterval(t *testing.T) { rvm, err := NewRangeMapper(2*time.Second, nilShardMetrics, mapperStats) require.NoError(t, err) - noop, mappedExpr, err := rvm.Parse(tc.expr) + noop, mappedExpr, err := rvm.Parse(syntax.MustParseExpr(tc.expr)) require.NoError(t, err) require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String())) @@ -1741,7 +1741,7 @@ func Test_SplitRangeVectorMapping(t *testing.T) { rvm, err := NewRangeMapper(time.Minute, nilShardMetrics, mapperStats) require.NoError(t, err) - noop, mappedExpr, err := rvm.Parse(tc.expr) + noop, mappedExpr, err := rvm.Parse(syntax.MustParseExpr(tc.expr)) require.NoError(t, err) require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String())) @@ -1932,7 +1932,7 @@ func Test_SplitRangeVectorMapping_Noop(t *testing.T) { rvm, err := NewRangeMapper(time.Minute, nilShardMetrics, mapperStats) require.NoError(t, err) - noop, mappedExpr, err := rvm.Parse(tc.expr) + noop, mappedExpr, err := rvm.Parse(syntax.MustParseExpr(tc.expr)) require.NoError(t, err) require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String())) @@ -1945,21 +1945,9 @@ func Test_SplitRangeVectorMapping_Noop(t *testing.T) { func Test_FailQuery(t *testing.T) { rvm, err := NewRangeMapper(2*time.Minute, nilShardMetrics, NewMapperStats()) require.NoError(t, err) - _, _, err = rvm.Parse(`{app="foo"} |= "err"`) + _, _, err = rvm.Parse(syntax.MustParseExpr(`{app="foo"} |= "err"`)) require.Error(t, err) - _, _, err = rvm.Parse(`topk(0, sum(count_over_time({app="foo"} | json | __error__="" [15m])))`) - require.Error(t, err) - // Check fixes for bug where missing or empty parameters for regexp and pattern parsers threw a panic - // Missing parameter to regexp parser - _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | regexp [1d])))`) - require.ErrorIs(t, err, logqlmodel.ErrParse) - // Empty parameter to regexp parser - _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | regexp ` + "``" + ` [1d])))`) - require.ErrorIs(t, err, logqlmodel.ErrParse) - // Empty parameter to pattern parser - _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | pattern ` + `""` + ` [1d])))`) - require.ErrorIs(t, err, logqlmodel.ErrParse) // Empty parameter to json parser - _, _, err = rvm.Parse(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | json [1d])))`) + _, _, err = rvm.Parse(syntax.MustParseExpr(`topk(10,sum by(namespace)(count_over_time({application="nginx", site!="eu-west-1-dev"} |= "/artifactory/" != "api" != "binarystore" | json [1d])))`)) require.NoError(t, err) } diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go index 6409cdbf0860c..f1ee7e4ba6985 100644 --- a/pkg/logql/shardmapper.go +++ b/pkg/logql/shardmapper.go @@ -41,12 +41,7 @@ func NewShardMapperMetrics(registerer prometheus.Registerer) *MapperMetrics { return newMapperMetrics(registerer, "shard") } -func (m ShardMapper) Parse(query string) (noop bool, bytesPerShard uint64, expr syntax.Expr, err error) { - parsed, err := syntax.ParseExpr(query) - if err != nil { - return false, 0, nil, err - } - +func (m ShardMapper) Parse(parsed syntax.Expr) (noop bool, bytesPerShard uint64, expr syntax.Expr, err error) { recorder := m.metrics.downstreamRecorder() mapped, bytesPerShard, err := m.Map(parsed, recorder) diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go index bdfd8a6c42d41..80b2e68751fee 100644 --- a/pkg/logql/shardmapper_test.go +++ b/pkg/logql/shardmapper_test.go @@ -1361,14 +1361,14 @@ func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher { func TestStringTrimming(t *testing.T) { for _, tc := range []struct { - expr string + expr syntax.Expr expected string shards int }{ { // sample expr in entirety for low shard count shards: 2, - expr: `count_over_time({app="foo"}[1m])`, + expr: syntax.MustParseExpr(`count_over_time({app="foo"}[1m])`), expected: ` downstream ++ downstream @@ -1377,7 +1377,7 @@ func TestStringTrimming(t *testing.T) { { // sample expr doesnt display infinite shards shards: 5, - expr: `count_over_time({app="foo"}[1m])`, + expr: syntax.MustParseExpr(`count_over_time({app="foo"}[1m])`), expected: ` downstream ++ downstream ++ @@ -1389,7 +1389,7 @@ func TestStringTrimming(t *testing.T) { { // log selector expr in entirety for low shard count shards: 2, - expr: `{app="foo"}`, + expr: syntax.MustParseExpr(`{app="foo"}`), expected: ` downstream<{app="foo"},shard=0_of_2> ++ downstream<{app="foo"},shard=1_of_2> @@ -1398,7 +1398,7 @@ func TestStringTrimming(t *testing.T) { { // log selector expr doesnt display infinite shards shards: 5, - expr: `{app="foo"}`, + expr: syntax.MustParseExpr(`{app="foo"}`), expected: ` downstream<{app="foo"},shard=0_of_5> ++ downstream<{app="foo"},shard=1_of_5> ++ @@ -1408,7 +1408,7 @@ func TestStringTrimming(t *testing.T) { `, }, } { - t.Run(tc.expr, func(t *testing.T) { + t.Run(tc.expr.String(), func(t *testing.T) { m := NewShardMapper(ConstantShards(tc.shards), nilShardMetrics) _, _, mappedExpr, err := m.Parse(tc.expr) require.Nil(t, err) diff --git a/pkg/logql/syntax/parser.go b/pkg/logql/syntax/parser.go index e1fe5971ff3a2..81874ba6d6c41 100644 --- a/pkg/logql/syntax/parser.go +++ b/pkg/logql/syntax/parser.go @@ -99,6 +99,14 @@ func ParseExprWithoutValidation(input string) (expr Expr, err error) { return p.Parse() } +func MustParseExpr(input string) Expr { + expr, err := ParseExpr(input) + if err != nil { + panic(err) + } + return expr +} + func validateExpr(expr Expr) error { switch e := expr.(type) { case SampleExpr: diff --git a/pkg/logql/test_utils.go b/pkg/logql/test_utils.go index 982fa7f5f16d0..b979dedb42327 100644 --- a/pkg/logql/test_utils.go +++ b/pkg/logql/test_utils.go @@ -218,17 +218,7 @@ func (m MockDownstreamer) Downstreamer(_ context.Context) Downstreamer { return func (m MockDownstreamer) Downstream(ctx context.Context, queries []DownstreamQuery) ([]logqlmodel.Result, error) { results := make([]logqlmodel.Result, 0, len(queries)) for _, query := range queries { - params := NewLiteralParams( - query.Expr.String(), - query.Params.Start(), - query.Params.End(), - query.Params.Step(), - query.Params.Interval(), - query.Params.Direction(), - query.Params.Limit(), - query.Shards.Encode(), - ) - res, err := m.Query(params).Exec(ctx) + res, err := m.Query(query.Params).Exec(ctx) if err != nil { return nil, err } diff --git a/pkg/querier/plan/plan.go b/pkg/querier/plan/plan.go new file mode 100644 index 0000000000000..6822932d7b241 --- /dev/null +++ b/pkg/querier/plan/plan.go @@ -0,0 +1,101 @@ +package plan + +import ( + "bytes" + + "github.com/grafana/loki/pkg/logql/syntax" +) + +type QueryPlan struct { + AST syntax.Expr +} + +func (t QueryPlan) Marshal() ([]byte, error) { + return t.MarshalJSON() +} + +func (t *QueryPlan) MarshalTo(data []byte) (int, error) { + appender := &appendWriter{ + slice: data[:0], + } + err := syntax.EncodeJSON(t.AST, appender) + if err != nil { + return 0, err + } + + return len(appender.slice), nil +} + +func (t *QueryPlan) Unmarshal(data []byte) error { + return t.UnmarshalJSON(data) +} + +func (t *QueryPlan) Size() int { + counter := &countWriter{} + err := syntax.EncodeJSON(t.AST, counter) + if err != nil { + return 0 + } + + return counter.bytes +} + +func (t QueryPlan) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := syntax.EncodeJSON(t.AST, &buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (t *QueryPlan) UnmarshalJSON(data []byte) error { + // An empty query plan is ingored to be backwards compatible. + if len(data) == 0 { + return nil + } + + expr, err := syntax.DecodeJSON(string(data)) + if err != nil { + return err + } + + t.AST = expr + return nil +} + +func (t QueryPlan) Equal(other QueryPlan) bool { + left, err := t.Marshal() + if err != nil { + return false + } + + right, err := other.Marshal() + if err != nil { + return false + } + return bytes.Equal(left, right) +} + +// countWriter is not writing any bytes. It just counts the bytes that would be +// written. +type countWriter struct { + bytes int +} + +// Write implements io.Writer. +func (w *countWriter) Write(p []byte) (int, error) { + w.bytes += len(p) + return len(p), nil +} + +// appendWriter appends to a slice. +type appendWriter struct { + slice []byte +} + +func (w *appendWriter) Write(p []byte) (int, error) { + w.slice = append(w.slice, p...) + return len(p), nil +} diff --git a/pkg/querier/plan/plan_test.go b/pkg/querier/plan/plan_test.go new file mode 100644 index 0000000000000..60f7d3fad1806 --- /dev/null +++ b/pkg/querier/plan/plan_test.go @@ -0,0 +1,26 @@ +package plan + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logql/syntax" +) + +func TestMarshalTo(t *testing.T) { + plan := QueryPlan{ + AST: syntax.MustParseExpr(`sum by (foo) (bytes_over_time({app="loki"} [1m]))`), + } + + data := make([]byte, plan.Size()) + _, err := plan.MarshalTo(data) + require.NoError(t, err) + + var buf bytes.Buffer + err = syntax.EncodeJSON(plan.AST, &buf) + require.NoError(t, err) + + require.JSONEq(t, buf.String(), string(data)) +} diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 2167a5134b602..b0c56a7439195 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -29,6 +29,7 @@ import ( "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" indexStats "github.com/grafana/loki/pkg/storage/stores/index/stats" "github.com/grafana/loki/pkg/util" @@ -259,6 +260,11 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } + parsed, err := syntax.ParseExpr(rangeQuery.Query) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + return &LokiRequest{ Query: rangeQuery.Query, Limit: rangeQuery.Limit, @@ -269,12 +275,21 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer Interval: rangeQuery.Interval.Milliseconds(), Path: r.URL.Path, Shards: rangeQuery.Shards, + Plan: &plan.QueryPlan{ + AST: parsed, + }, }, nil case InstantQueryOp: req, err := loghttp.ParseInstantQuery(r) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } + + parsed, err := syntax.ParseExpr(req.Query) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + return &LokiInstantRequest{ Query: req.Query, Limit: req.Limit, @@ -282,6 +297,9 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer TimeTs: req.Ts.UTC(), Path: r.URL.Path, Shards: req.Shards, + Plan: &plan.QueryPlan{ + AST: parsed, + }, }, nil case SeriesOp: req, err := loghttp.ParseAndValidateSeriesQuery(r) @@ -409,6 +427,12 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) if err != nil { return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } + + parsed, err := syntax.ParseExpr(req.Query) + if err != nil { + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + return &LokiRequest{ Query: req.Query, Limit: req.Limit, @@ -419,12 +443,21 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) Interval: req.Interval.Milliseconds(), Path: r.Url, Shards: req.Shards, + Plan: &plan.QueryPlan{ + AST: parsed, + }, }, ctx, nil case InstantQueryOp: req, err := loghttp.ParseInstantQuery(httpReq) if err != nil { return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } + + parsed, err := syntax.ParseExpr(req.Query) + if err != nil { + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + return &LokiInstantRequest{ Query: req.Query, Limit: req.Limit, @@ -432,6 +465,9 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) TimeTs: req.Ts.UTC(), Path: r.Url, Shards: req.Shards, + Plan: &plan.QueryPlan{ + AST: parsed, + }, }, ctx, nil case SeriesOp: req, err := loghttp.ParseAndValidateSeriesQuery(httpReq) @@ -1429,10 +1465,14 @@ type paramsRangeWrapper struct { *LokiRequest } -func (p paramsRangeWrapper) Query() string { +func (p paramsRangeWrapper) QueryString() string { return p.GetQuery() } +func (p paramsRangeWrapper) GetExpression() syntax.Expr { + return p.LokiRequest.Plan.AST +} + func (p paramsRangeWrapper) Start() time.Time { return p.GetStartTs() } @@ -1459,10 +1499,14 @@ type paramsInstantWrapper struct { *LokiInstantRequest } -func (p paramsInstantWrapper) Query() string { +func (p paramsInstantWrapper) QueryString() string { return p.GetQuery() } +func (p paramsInstantWrapper) GetExpression() syntax.Expr { + return p.LokiInstantRequest.Plan.AST +} + func (p paramsInstantWrapper) Start() time.Time { return p.LokiInstantRequest.GetTimeTs() } @@ -1487,10 +1531,14 @@ type paramsSeriesWrapper struct { *LokiSeriesRequest } -func (p paramsSeriesWrapper) Query() string { +func (p paramsSeriesWrapper) QueryString() string { return p.GetQuery() } +func (p paramsSeriesWrapper) GetExpression() syntax.Expr { + return nil +} + func (p paramsSeriesWrapper) Start() time.Time { return p.LokiSeriesRequest.GetStartTs() } @@ -1515,10 +1563,14 @@ type paramsLabelWrapper struct { *LabelRequest } -func (p paramsLabelWrapper) Query() string { +func (p paramsLabelWrapper) QueryString() string { return p.GetQuery() } +func (p paramsLabelWrapper) GetExpression() syntax.Expr { + return nil +} + func (p paramsLabelWrapper) Start() time.Time { return p.LabelRequest.GetStartTs() } @@ -1543,10 +1595,14 @@ type paramsStatsWrapper struct { *logproto.IndexStatsRequest } -func (p paramsStatsWrapper) Query() string { +func (p paramsStatsWrapper) QueryString() string { return p.GetQuery() } +func (p paramsStatsWrapper) GetExpression() syntax.Expr { + return nil +} + func (p paramsStatsWrapper) Start() time.Time { return p.From.Time() } diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index 77df78aca6958..c722b12af78d8 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -25,8 +25,10 @@ import ( "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/httpreq" @@ -63,6 +65,9 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { Path: "/query_range", StartTs: start, EndTs: end, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"}`), + }, }, false}, {"query_range", func() (*http.Request, error) { return http.NewRequest(http.MethodGet, @@ -76,6 +81,9 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { Path: "/query_range", StartTs: start, EndTs: end, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"}`), + }, }, false}, {"legacy query_range with refexp", func() (*http.Request, error) { return http.NewRequest(http.MethodGet, @@ -89,6 +97,9 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { Path: "/api/prom/query", StartTs: start, EndTs: end, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"} |~ "foo"`), + }, }, false}, {"series", func() (*http.Request, error) { return http.NewRequest(http.MethodGet, @@ -559,7 +570,13 @@ func Test_codec_DecodeProtobufResponseParity(t *testing.T) { } codec := RequestProtobufCodec{} for i, queryTest := range queryTests { - u := &url.URL{Path: "/loki/api/v1/query_range"} + params := url.Values{ + "query": []string{`{app="foo"}`}, + } + u := &url.URL{ + Path: "/loki/api/v1/query_range", + RawQuery: params.Encode(), + } httpReq := &http.Request{ Method: "GET", RequestURI: u.String(), diff --git a/pkg/querier/queryrange/downstreamer.go b/pkg/querier/queryrange/downstreamer.go index b7a3d2f57a3ff..860aa980fb30b 100644 --- a/pkg/querier/queryrange/downstreamer.go +++ b/pkg/querier/queryrange/downstreamer.go @@ -20,6 +20,7 @@ import ( "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/metadata" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" "github.com/grafana/loki/pkg/util/spanlogger" @@ -34,19 +35,22 @@ type DownstreamHandler struct { next queryrangebase.Handler } -func ParamsToLokiRequest(params logql.Params, shards logql.Shards) queryrangebase.Request { +func ParamsToLokiRequest(params logql.Params) queryrangebase.Request { if logql.GetRangeType(params) == logql.InstantType { return &LokiInstantRequest{ - Query: params.Query(), + Query: params.QueryString(), Limit: params.Limit(), TimeTs: params.Start(), Direction: params.Direction(), Path: "/loki/api/v1/query", // TODO(owen-d): make this derivable - Shards: shards.Encode(), + Shards: params.Shards(), + Plan: &plan.QueryPlan{ + AST: params.GetExpression(), + }, } } return &LokiRequest{ - Query: params.Query(), + Query: params.QueryString(), Limit: params.Limit(), Step: params.Step().Milliseconds(), Interval: params.Interval().Milliseconds(), @@ -54,7 +58,10 @@ func ParamsToLokiRequest(params logql.Params, shards logql.Shards) queryrangebas EndTs: params.End(), Direction: params.Direction(), Path: "/loki/api/v1/query_range", // TODO(owen-d): make this derivable - Shards: shards.Encode(), + Shards: params.Shards(), + Plan: &plan.QueryPlan{ + AST: params.GetExpression(), + }, } } @@ -97,12 +104,12 @@ type instance struct { func (in instance) Downstream(ctx context.Context, queries []logql.DownstreamQuery) ([]logqlmodel.Result, error) { return in.For(ctx, queries, func(qry logql.DownstreamQuery) (logqlmodel.Result, error) { - req := ParamsToLokiRequest(qry.Params, qry.Shards).WithQuery(qry.Expr.String()) + req := ParamsToLokiRequest(qry.Params).WithQuery(qry.Params.GetExpression().String()) sp, ctx := opentracing.StartSpanFromContext(ctx, "DownstreamHandler.instance") defer sp.Finish() logger := spanlogger.FromContext(ctx) defer logger.Finish() - level.Debug(logger).Log("shards", fmt.Sprintf("%+v", qry.Shards), "query", req.GetQuery(), "step", req.GetStep(), "handler", reflect.TypeOf(in.handler)) + level.Debug(logger).Log("shards", fmt.Sprintf("%+v", qry.Params.Shards()), "query", req.GetQuery(), "step", req.GetStep(), "handler", reflect.TypeOf(in.handler)) res, err := in.handler.Do(ctx, req) if err != nil { diff --git a/pkg/querier/queryrange/downstreamer_test.go b/pkg/querier/queryrange/downstreamer_test.go index 552c0c53aa056..e453f03d9a3ee 100644 --- a/pkg/querier/queryrange/downstreamer_test.go +++ b/pkg/querier/queryrange/downstreamer_test.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "strconv" + "strings" "sync" "testing" "time" @@ -223,8 +225,8 @@ func TestInstanceFor(t *testing.T) { } in := mkIn() newParams := func() logql.Params { - return logql.NewLiteralParams( - "", + params, err := logql.NewLiteralParams( + `{app="foo"}`, time.Now(), time.Now(), 0, @@ -233,6 +235,8 @@ func TestInstanceFor(t *testing.T) { 1000, nil, ) + require.NoError(t, err) + return params } var queries []logql.DownstreamQuery @@ -280,22 +284,32 @@ func TestInstanceFor(t *testing.T) { context.TODO(), []logql.DownstreamQuery{ { - Params: newParams(), - Shards: logql.Shards{ - {Shard: 0, Of: 2}, + Params: logql.ParamsWithShardsOverride{ + Params: newParams(), + ShardsOverride: logql.Shards{ + {Shard: 0, Of: 2}, + }.Encode(), }, }, { - Params: newParams(), - Shards: logql.Shards{ - {Shard: 1, Of: 2}, + Params: logql.ParamsWithShardsOverride{ + Params: newParams(), + ShardsOverride: logql.Shards{ + {Shard: 1, Of: 2}, + }.Encode(), }, }, }, func(qry logql.DownstreamQuery) (logqlmodel.Result, error) { + // Decode shard + s := strings.Split(qry.Params.Shards()[0], "_") + shard, err := strconv.Atoi(s[0]) + if err != nil { + return logqlmodel.Result{}, err + } return logqlmodel.Result{ Data: promql.Scalar{ - V: float64(qry.Shards[0].Shard), + V: float64(shard), }, }, nil }, @@ -309,8 +323,8 @@ func TestInstanceFor(t *testing.T) { } func TestInstanceDownstream(t *testing.T) { - params := logql.NewLiteralParams( - "", + params, err := logql.NewLiteralParams( + `{foo="bar"}`, time.Now(), time.Now(), 0, @@ -319,8 +333,9 @@ func TestInstanceDownstream(t *testing.T) { 1000, nil, ) + require.NoError(t, err) expr, err := syntax.ParseExpr(`{foo="bar"}`) - require.Nil(t, err) + require.NoError(t, err) expectedResp := func() *LokiResponse { return &LokiResponse{ @@ -340,9 +355,10 @@ func TestInstanceDownstream(t *testing.T) { queries := []logql.DownstreamQuery{ { - Expr: expr, - Params: params, - Shards: logql.Shards{{Shard: 0, Of: 2}}, + Params: logql.ParamsWithShardsOverride{ + Params: logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: expr}, + ShardsOverride: logql.Shards{{Shard: 0, Of: 2}}.Encode(), + }, }, } @@ -353,7 +369,7 @@ func TestInstanceDownstream(t *testing.T) { // for some reason these seemingly can't be checked in their own goroutines, // so we assign them to scoped variables for later comparison. got = req - want = ParamsToLokiRequest(params, queries[0].Shards).WithQuery(expr.String()) + want = ParamsToLokiRequest(queries[0].Params).WithQuery(expr.String()) return expectedResp(), nil }, @@ -484,9 +500,10 @@ func TestDownstreamAccumulatorSimple(t *testing.T) { x = append(x, *s) } // dummy params. Only need to populate direction & limit - params := logql.NewLiteralParams( - "", time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil, + params, err := logql.NewLiteralParams( + `{app="foo"}`, time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil, ) + require.NoError(t, err) acc := newDownstreamAccumulator(params, 1) result := logqlmodel.Result{ @@ -542,9 +559,10 @@ func TestDownstreamAccumulatorMultiMerge(t *testing.T) { } // dummy params. Only need to populate direction & limit - params := logql.NewLiteralParams( - "", time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil, + params, err := logql.NewLiteralParams( + `{app="foo"}`, time.Time{}, time.Time{}, 0, 0, direction, uint32(lim), nil, ) + require.NoError(t, err) acc := newDownstreamAccumulator(params, 1) for i := 0; i < nQueries; i++ { diff --git a/pkg/querier/queryrange/marshal.go b/pkg/querier/queryrange/marshal.go index 8b61facf39e4e..0e72b15140e38 100644 --- a/pkg/querier/queryrange/marshal.go +++ b/pkg/querier/queryrange/marshal.go @@ -6,10 +6,12 @@ import ( "context" "fmt" "io" + "net/http" "time" "github.com/gogo/googleapis/google/rpc" "github.com/gogo/status" + "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/user" "github.com/opentracing/opentracing-go" "github.com/prometheus/prometheus/promql" @@ -19,7 +21,9 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/sketch" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/util/httpreq" "github.com/grafana/loki/pkg/util/querylimits" @@ -277,12 +281,32 @@ func (Codec) QueryRequestUnwrap(ctx context.Context, req *QueryRequest) (queryra case *QueryRequest_Series: return concrete.Series, ctx, nil case *QueryRequest_Instant: + if concrete.Instant.Plan == nil { + parsed, err := syntax.ParseExpr(concrete.Instant.GetQuery()) + if err != nil { + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + concrete.Instant.Plan = &plan.QueryPlan{ + AST: parsed, + } + } + return concrete.Instant, ctx, nil case *QueryRequest_Stats: return concrete.Stats, ctx, nil case *QueryRequest_Volume: return concrete.Volume, ctx, nil case *QueryRequest_Streams: + if concrete.Streams.Plan == nil { + parsed, err := syntax.ParseExpr(concrete.Streams.GetQuery()) + if err != nil { + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + concrete.Streams.Plan = &plan.QueryPlan{ + AST: parsed, + } + } + return concrete.Streams, ctx, nil case *QueryRequest_Labels: return &LabelRequest{ diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index f3955da153bf9..c2cce1dc514fd 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -16,6 +16,7 @@ import ( stats "github.com/grafana/loki/pkg/logqlmodel/stats" _ "github.com/grafana/loki/pkg/push" github_com_grafana_loki_pkg_push "github.com/grafana/loki/pkg/push" + github_com_grafana_loki_pkg_querier_plan "github.com/grafana/loki/pkg/querier/plan" queryrangebase "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" _ "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" github_com_grafana_loki_pkg_querier_queryrange_queryrangebase_definitions "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" @@ -40,15 +41,16 @@ var _ = time.Kitchen const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type LokiRequest struct { - Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - Step int64 `protobuf:"varint,3,opt,name=step,proto3" json:"step,omitempty"` - Interval int64 `protobuf:"varint,9,opt,name=interval,proto3" json:"interval,omitempty"` - StartTs time.Time `protobuf:"bytes,4,opt,name=startTs,proto3,stdtime" json:"startTs"` - EndTs time.Time `protobuf:"bytes,5,opt,name=endTs,proto3,stdtime" json:"endTs"` - Direction logproto.Direction `protobuf:"varint,6,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` - Path string `protobuf:"bytes,7,opt,name=path,proto3" json:"path,omitempty"` - Shards []string `protobuf:"bytes,8,rep,name=shards,proto3" json:"shards"` + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + Step int64 `protobuf:"varint,3,opt,name=step,proto3" json:"step,omitempty"` + Interval int64 `protobuf:"varint,9,opt,name=interval,proto3" json:"interval,omitempty"` + StartTs time.Time `protobuf:"bytes,4,opt,name=startTs,proto3,stdtime" json:"startTs"` + EndTs time.Time `protobuf:"bytes,5,opt,name=endTs,proto3,stdtime" json:"endTs"` + Direction logproto.Direction `protobuf:"varint,6,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` + Path string `protobuf:"bytes,7,opt,name=path,proto3" json:"path,omitempty"` + Shards []string `protobuf:"bytes,8,rep,name=shards,proto3" json:"shards"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,10,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *LokiRequest) Reset() { *m = LokiRequest{} } @@ -147,12 +149,13 @@ func (m *LokiRequest) GetShards() []string { } type LokiInstantRequest struct { - Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - TimeTs time.Time `protobuf:"bytes,3,opt,name=timeTs,proto3,stdtime" json:"timeTs"` - Direction logproto.Direction `protobuf:"varint,4,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` - Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` - Shards []string `protobuf:"bytes,6,rep,name=shards,proto3" json:"shards"` + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + TimeTs time.Time `protobuf:"bytes,3,opt,name=timeTs,proto3,stdtime" json:"timeTs"` + Direction logproto.Direction `protobuf:"varint,4,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + Shards []string `protobuf:"bytes,6,rep,name=shards,proto3" json:"shards"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,7,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *LokiInstantRequest) Reset() { *m = LokiInstantRequest{} } @@ -1123,99 +1126,101 @@ func init() { } var fileDescriptor_51b9d53b40d11902 = []byte{ - // 1458 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x1b, 0xf7, 0xfa, 0x33, 0x9e, 0x34, 0x79, 0xfb, 0x4e, 0xa2, 0x74, 0xdf, 0xb4, 0xef, 0xae, 0x65, - 0x89, 0xd6, 0x20, 0x58, 0x53, 0xa7, 0xf4, 0x13, 0x10, 0x5d, 0xda, 0xca, 0x15, 0x2d, 0x6a, 0x37, - 0x11, 0x07, 0x6e, 0x13, 0x7b, 0x62, 0x2f, 0xf6, 0x7a, 0x37, 0x3b, 0xe3, 0xa8, 0xb9, 0xf1, 0x07, - 0x80, 0xd4, 0xbf, 0x02, 0x21, 0x51, 0x21, 0x71, 0xe5, 0xc8, 0xa9, 0xc7, 0x1c, 0xab, 0x48, 0x18, - 0xea, 0x72, 0x80, 0x9c, 0xfa, 0x27, 0xa0, 0xf9, 0xd8, 0xf5, 0xac, 0xed, 0xb4, 0x76, 0xb9, 0xb4, - 0x12, 0x97, 0x64, 0x3e, 0x9e, 0xdf, 0xec, 0x3c, 0xbf, 0xe7, 0xf7, 0x3c, 0x33, 0x63, 0x70, 0x2e, - 0xe8, 0xb4, 0xaa, 0xbb, 0x7d, 0x1c, 0xba, 0x38, 0xe4, 0xff, 0xf7, 0x43, 0xd4, 0x6b, 0x61, 0xa5, - 0x69, 0x05, 0xa1, 0x4f, 0x7d, 0x08, 0x46, 0x23, 0xeb, 0xb5, 0x96, 0x4b, 0xdb, 0xfd, 0x6d, 0xab, - 0xe1, 0x7b, 0xd5, 0x96, 0xdf, 0xf2, 0xab, 0x2d, 0xdf, 0x6f, 0x75, 0x31, 0x0a, 0x5c, 0x22, 0x9b, - 0xd5, 0x30, 0x68, 0x54, 0x09, 0x45, 0xb4, 0x4f, 0x04, 0x7e, 0x7d, 0x95, 0x19, 0xf2, 0x26, 0x87, - 0xc8, 0x51, 0x53, 0x9a, 0xf3, 0xde, 0x76, 0x7f, 0xa7, 0x4a, 0x5d, 0x0f, 0x13, 0x8a, 0xbc, 0x40, - 0x1a, 0x9c, 0x66, 0xfb, 0xeb, 0xfa, 0x2d, 0x81, 0x8c, 0x1a, 0x72, 0xf2, 0x7f, 0x89, 0x49, 0xd2, - 0xc1, 0xb4, 0xd1, 0x96, 0x53, 0x25, 0x39, 0xb5, 0xdb, 0xf5, 0xfc, 0x26, 0xee, 0xf2, 0xbd, 0x10, - 0xf1, 0x57, 0x5a, 0xac, 0x30, 0x8b, 0xa0, 0x4f, 0xda, 0xfc, 0x8f, 0x1c, 0xfc, 0xf4, 0xa5, 0x74, - 0x6c, 0x23, 0x82, 0xab, 0x4d, 0xbc, 0xe3, 0xf6, 0x5c, 0xea, 0xfa, 0x3d, 0xa2, 0xb6, 0xe5, 0x22, - 0x17, 0x67, 0x5b, 0x64, 0x9c, 0xe2, 0xf2, 0x41, 0x1a, 0x2c, 0xde, 0xf1, 0x3b, 0xae, 0x83, 0x77, - 0xfb, 0x98, 0x50, 0xb8, 0x0a, 0x72, 0xdc, 0x46, 0xd7, 0x4a, 0x5a, 0xa5, 0xe8, 0x88, 0x0e, 0x1b, - 0xed, 0xba, 0x9e, 0x4b, 0xf5, 0x74, 0x49, 0xab, 0x2c, 0x39, 0xa2, 0x03, 0x21, 0xc8, 0x12, 0x8a, - 0x03, 0x3d, 0x53, 0xd2, 0x2a, 0x19, 0x87, 0xb7, 0xe1, 0x3a, 0x58, 0x70, 0x7b, 0x14, 0x87, 0x7b, - 0xa8, 0xab, 0x17, 0xf9, 0x78, 0xdc, 0x87, 0x1f, 0x83, 0x02, 0xa1, 0x28, 0xa4, 0x5b, 0x44, 0xcf, - 0x96, 0xb4, 0xca, 0x62, 0x6d, 0xdd, 0x12, 0xa1, 0xb0, 0xa2, 0x50, 0x58, 0x5b, 0x51, 0x28, 0xec, - 0x85, 0xc7, 0x03, 0x33, 0xf5, 0xf0, 0x37, 0x53, 0x73, 0x22, 0x10, 0xbc, 0x0a, 0x72, 0xb8, 0xd7, - 0xdc, 0x22, 0x7a, 0x6e, 0x0e, 0xb4, 0x80, 0xc0, 0xf3, 0xa0, 0xd8, 0x74, 0x43, 0xdc, 0x60, 0x9c, - 0xe9, 0xf9, 0x92, 0x56, 0x59, 0xae, 0xad, 0x58, 0x71, 0x68, 0x6f, 0x44, 0x53, 0xce, 0xc8, 0x8a, - 0xb9, 0x17, 0x20, 0xda, 0xd6, 0x0b, 0x9c, 0x09, 0xde, 0x86, 0x65, 0x90, 0x27, 0x6d, 0x14, 0x36, - 0x89, 0xbe, 0x50, 0xca, 0x54, 0x8a, 0x36, 0x38, 0x1a, 0x98, 0x72, 0xc4, 0x91, 0xff, 0xcb, 0x7f, - 0x69, 0x00, 0x32, 0x4a, 0x6f, 0xf7, 0x08, 0x45, 0x3d, 0xfa, 0x2a, 0xcc, 0x7e, 0x08, 0xf2, 0x4c, - 0x94, 0x5b, 0x84, 0x73, 0x3b, 0xab, 0xab, 0x12, 0x93, 0xf4, 0x35, 0x3b, 0x97, 0xaf, 0xb9, 0xa9, - 0xbe, 0xe6, 0x8f, 0xf5, 0xf5, 0x87, 0x2c, 0x38, 0x21, 0xe4, 0x43, 0x02, 0xbf, 0x47, 0x30, 0x03, - 0x6d, 0xf2, 0x14, 0x14, 0x6e, 0x4a, 0x10, 0x1f, 0x71, 0xe4, 0x0c, 0xfc, 0x04, 0x64, 0x6f, 0x20, - 0x8a, 0xb8, 0xcb, 0x8b, 0xb5, 0x55, 0x4b, 0x11, 0x25, 0x5b, 0x8b, 0xcd, 0xd9, 0x6b, 0xcc, 0xab, - 0xa3, 0x81, 0xb9, 0xdc, 0x44, 0x14, 0xbd, 0xeb, 0x7b, 0x2e, 0xc5, 0x5e, 0x40, 0xf7, 0x1d, 0x8e, - 0x84, 0x1f, 0x80, 0xe2, 0xcd, 0x30, 0xf4, 0xc3, 0xad, 0xfd, 0x00, 0x73, 0x8a, 0x8a, 0xf6, 0xa9, - 0xa3, 0x81, 0xb9, 0x82, 0xa3, 0x41, 0x05, 0x31, 0xb2, 0x84, 0x6f, 0x83, 0x1c, 0xef, 0x70, 0x52, - 0x8a, 0xf6, 0xca, 0xd1, 0xc0, 0xfc, 0x0f, 0x87, 0x28, 0xe6, 0xc2, 0x22, 0xc9, 0x61, 0x6e, 0x26, - 0x0e, 0xe3, 0x50, 0xe6, 0xd5, 0x50, 0xea, 0xa0, 0xb0, 0x87, 0x43, 0xc2, 0x96, 0x29, 0xf0, 0xf1, - 0xa8, 0x0b, 0xaf, 0x03, 0xc0, 0x88, 0x71, 0x09, 0x75, 0x1b, 0x4c, 0x4f, 0x8c, 0x8c, 0x25, 0x4b, - 0x94, 0x0b, 0x07, 0x93, 0x7e, 0x97, 0xda, 0x50, 0xb2, 0xa0, 0x18, 0x3a, 0x4a, 0x1b, 0x3e, 0xd2, - 0x40, 0xa1, 0x8e, 0x51, 0x13, 0x87, 0x44, 0x2f, 0x96, 0x32, 0x95, 0xc5, 0xda, 0x5b, 0x96, 0x5a, - 0x1b, 0xee, 0x85, 0xbe, 0x87, 0x69, 0x1b, 0xf7, 0x49, 0x14, 0x20, 0x61, 0x6d, 0x77, 0x0e, 0x07, - 0xe6, 0xb6, 0x5a, 0x51, 0x43, 0xb4, 0x83, 0x7a, 0xa8, 0xda, 0xf5, 0x3b, 0x6e, 0x75, 0xee, 0x7a, - 0x74, 0xec, 0x77, 0x8e, 0x06, 0xa6, 0xf6, 0x9e, 0x13, 0x6d, 0xb1, 0xfc, 0xab, 0x06, 0xfe, 0xcb, - 0x22, 0xbc, 0xc9, 0xd6, 0x26, 0x4a, 0x62, 0x78, 0x88, 0x36, 0xda, 0xba, 0xc6, 0x64, 0xe6, 0x88, - 0x8e, 0x5a, 0x2c, 0xd2, 0xff, 0xa8, 0x58, 0x64, 0xe6, 0x2f, 0x16, 0x51, 0x36, 0x64, 0xa7, 0x66, - 0x43, 0xee, 0xd8, 0x6c, 0xf8, 0x26, 0x23, 0x32, 0x3f, 0xf2, 0x6f, 0x8e, 0x9c, 0xb8, 0x15, 0xe7, - 0x44, 0x86, 0xef, 0x36, 0x96, 0x9a, 0x58, 0xeb, 0x76, 0x13, 0xf7, 0xa8, 0xbb, 0xe3, 0xe2, 0xf0, - 0x25, 0x99, 0xa1, 0xc8, 0x2d, 0x93, 0x94, 0x9b, 0xaa, 0x95, 0xec, 0x6b, 0xaf, 0x95, 0xb1, 0xec, - 0xc8, 0xbd, 0x42, 0x76, 0x94, 0x9f, 0xa7, 0xc1, 0x1a, 0x0b, 0xc7, 0x1d, 0xb4, 0x8d, 0xbb, 0x9f, - 0x23, 0x6f, 0xce, 0x90, 0x9c, 0x55, 0x42, 0x52, 0xb4, 0xe1, 0xbf, 0x94, 0xcf, 0x40, 0xf9, 0x77, - 0x1a, 0x58, 0x88, 0x6a, 0x38, 0xb4, 0x00, 0x10, 0x30, 0x5e, 0xa6, 0x05, 0xd1, 0xcb, 0x0c, 0x1c, - 0xc6, 0xa3, 0x8e, 0x62, 0x01, 0xbf, 0x02, 0x79, 0xd1, 0x93, 0x59, 0x70, 0x4a, 0xc9, 0x02, 0x1a, - 0x62, 0xe4, 0x5d, 0x6f, 0xa2, 0x80, 0xe2, 0xd0, 0xbe, 0xc2, 0x76, 0x71, 0x38, 0x30, 0xcf, 0xbd, - 0x88, 0x22, 0x7e, 0xc3, 0x12, 0x38, 0x16, 0x5c, 0xf1, 0x4d, 0x47, 0x7e, 0xa1, 0xfc, 0xad, 0x06, - 0x4e, 0xb2, 0x8d, 0x32, 0x6a, 0x62, 0x55, 0xdc, 0x00, 0x0b, 0xa1, 0x6c, 0xf3, 0xed, 0x2e, 0xd6, - 0xca, 0x56, 0x92, 0xd6, 0x29, 0x54, 0xda, 0xd9, 0xc7, 0x03, 0x53, 0x73, 0x62, 0x24, 0xdc, 0x48, - 0xd0, 0x98, 0x9e, 0x46, 0x23, 0x83, 0xa4, 0x12, 0xc4, 0xfd, 0x9c, 0x06, 0xf0, 0x76, 0xaf, 0x89, - 0x1f, 0x30, 0xf1, 0x8d, 0x74, 0xda, 0x9f, 0xd8, 0xd1, 0x99, 0x11, 0x29, 0x93, 0xf6, 0xf6, 0xb5, - 0xc3, 0x81, 0x79, 0xe9, 0x45, 0xac, 0xbc, 0x00, 0xac, 0xb8, 0xa0, 0x0a, 0x37, 0xfd, 0xfa, 0x9f, - 0x2b, 0x3f, 0xa6, 0xc1, 0xf2, 0x17, 0x7e, 0xb7, 0xef, 0xe1, 0x98, 0x38, 0x6f, 0x82, 0x38, 0x7d, - 0x44, 0x5c, 0xd2, 0xd6, 0xbe, 0x74, 0x38, 0x30, 0x37, 0x66, 0x22, 0x2d, 0x09, 0x7c, 0x73, 0x09, - 0x7b, 0x94, 0x06, 0xab, 0x5b, 0x7e, 0xf0, 0xd9, 0x26, 0x7f, 0xbe, 0x28, 0x75, 0x11, 0x4f, 0xd0, - 0xb6, 0x3a, 0xa2, 0x8d, 0x21, 0xee, 0x22, 0x1a, 0xba, 0x0f, 0xec, 0x8d, 0xc3, 0x81, 0x59, 0x9d, - 0x89, 0xb2, 0x11, 0xe8, 0xcd, 0xa5, 0xeb, 0x97, 0x34, 0x58, 0xbb, 0xdf, 0x47, 0x3d, 0xea, 0x76, - 0xb1, 0xa0, 0x2c, 0x26, 0x6c, 0x7f, 0x82, 0x30, 0x63, 0x44, 0x58, 0x12, 0x23, 0xa9, 0xfb, 0xe8, - 0x70, 0x60, 0x5e, 0x99, 0x89, 0xba, 0x69, 0xf0, 0x37, 0x97, 0xc4, 0x9f, 0xb2, 0x60, 0xe9, 0x3e, - 0x5b, 0x25, 0xe6, 0xee, 0x1d, 0x20, 0x8f, 0x5c, 0xc9, 0x1c, 0x8c, 0xee, 0x68, 0x61, 0xd0, 0xb0, - 0x36, 0xe5, 0x61, 0x2c, 0x2c, 0xe0, 0x65, 0x90, 0x27, 0xfc, 0x26, 0x24, 0x0b, 0xaa, 0x31, 0xfe, - 0x6a, 0x48, 0xde, 0xb9, 0xea, 0x29, 0x47, 0xda, 0xb3, 0xb7, 0x54, 0x97, 0x5d, 0x00, 0xa2, 0x9b, - 0x60, 0x79, 0x1c, 0x39, 0x79, 0x3d, 0x60, 0x68, 0x81, 0x81, 0x17, 0x41, 0x8e, 0x57, 0x6e, 0xf9, - 0x62, 0x4d, 0x7c, 0x76, 0xb2, 0x84, 0xd6, 0x53, 0x8e, 0x30, 0x87, 0x35, 0x90, 0x0d, 0x42, 0xdf, - 0x93, 0xa7, 0xe8, 0x99, 0xf1, 0x6f, 0xaa, 0xc7, 0x4e, 0x3d, 0xe5, 0x70, 0x5b, 0x78, 0x81, 0x5d, - 0x79, 0xd9, 0x79, 0x45, 0xf8, 0x13, 0x82, 0x95, 0xac, 0x31, 0x98, 0x02, 0x89, 0x4c, 0xe1, 0x05, - 0x90, 0xdf, 0xe3, 0x65, 0x89, 0xbf, 0x2f, 0xd8, 0xdd, 0x51, 0x01, 0x25, 0x0b, 0x16, 0xf3, 0x4b, - 0xd8, 0xc2, 0x5b, 0xe0, 0x04, 0xf5, 0x83, 0x4e, 0x54, 0x00, 0xe4, 0xf3, 0xa3, 0xa4, 0x62, 0xa7, - 0x15, 0x88, 0x7a, 0xca, 0x49, 0xe0, 0xe0, 0x3d, 0x70, 0x72, 0x37, 0x21, 0x53, 0x4c, 0xf8, 0xbb, - 0x7f, 0x8c, 0xe7, 0xe9, 0xd9, 0x53, 0x4f, 0x39, 0x13, 0x68, 0x1b, 0x8c, 0x32, 0xaa, 0xfc, 0x47, - 0x06, 0x9c, 0x90, 0x9a, 0x11, 0x6f, 0x85, 0x4b, 0xb1, 0x0c, 0x84, 0x64, 0xfe, 0x7f, 0x9c, 0x0c, - 0xb8, 0xb9, 0xa2, 0x82, 0xf7, 0x63, 0x15, 0x08, 0xfd, 0xac, 0x8d, 0xb2, 0x94, 0xc7, 0x5f, 0x41, - 0xc8, 0xc8, 0x6f, 0x44, 0x91, 0x17, 0xb2, 0x39, 0x3d, 0xfd, 0xdc, 0x8d, 0x50, 0x32, 0xec, 0x57, - 0x41, 0xc1, 0x15, 0xcf, 0xfe, 0x69, 0x82, 0x99, 0xfc, 0x55, 0x80, 0x05, 0x52, 0x02, 0xe0, 0xc6, - 0x28, 0xfc, 0x42, 0x35, 0xa7, 0x26, 0xc3, 0x1f, 0x83, 0xa2, 0xe8, 0x9f, 0x8f, 0xa3, 0x9f, 0x97, - 0x98, 0x89, 0xc3, 0x2a, 0x76, 0x4c, 0x86, 0xbe, 0x0e, 0x16, 0x3c, 0x4c, 0x11, 0xbb, 0xcb, 0xea, - 0x05, 0x5e, 0x37, 0xce, 0x26, 0x43, 0x35, 0xe2, 0xdb, 0xba, 0x2b, 0x0d, 0x6f, 0xf6, 0x68, 0xb8, - 0x2f, 0xaf, 0x2d, 0x31, 0x7a, 0xfd, 0x1a, 0x58, 0x4a, 0x18, 0xc0, 0x93, 0x20, 0xd3, 0xc1, 0xd1, - 0x2f, 0x1c, 0xac, 0xc9, 0x1e, 0x77, 0x7b, 0xa8, 0xdb, 0xc7, 0x9c, 0xf6, 0xa2, 0x23, 0x3a, 0x57, - 0xd3, 0x97, 0x35, 0xbb, 0x08, 0x0a, 0xa1, 0xf8, 0x8a, 0xdd, 0x3c, 0x78, 0x6a, 0xa4, 0x9e, 0x3c, - 0x35, 0x52, 0xcf, 0x9f, 0x1a, 0xda, 0xd7, 0x43, 0x43, 0xfb, 0x7e, 0x68, 0x68, 0x8f, 0x87, 0x86, - 0x76, 0x30, 0x34, 0xb4, 0xdf, 0x87, 0x86, 0xf6, 0xe7, 0xd0, 0x48, 0x3d, 0x1f, 0x1a, 0xda, 0xc3, - 0x67, 0x46, 0xea, 0xe0, 0x99, 0x91, 0x7a, 0xf2, 0xcc, 0x48, 0x7d, 0x69, 0xcd, 0x57, 0xc2, 0xb6, - 0xf3, 0x9c, 0x96, 0x8d, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x66, 0x27, 0xc9, 0x7f, 0x7f, 0x14, - 0x00, 0x00, + // 1498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, + 0x1a, 0x15, 0xf5, 0xb4, 0xc6, 0x8f, 0x9b, 0x3b, 0x36, 0x1c, 0x5e, 0x27, 0x97, 0x14, 0x04, 0xdc, + 0x44, 0xb7, 0x68, 0xa9, 0xc6, 0x4e, 0xf3, 0x6c, 0x8b, 0x86, 0x4d, 0x02, 0xa7, 0x4d, 0x8a, 0x84, + 0x36, 0xba, 0xe8, 0x6e, 0x2c, 0x8d, 0x25, 0x56, 0x7c, 0x99, 0x33, 0x32, 0xe2, 0x5d, 0x7f, 0x40, + 0x0b, 0xe4, 0x07, 0x74, 0x5d, 0x14, 0x68, 0x50, 0xa0, 0x8b, 0x6e, 0xba, 0xec, 0x2a, 0xcb, 0x2c, + 0x03, 0x01, 0x65, 0x1b, 0xa5, 0x8b, 0xc2, 0xab, 0xfc, 0x84, 0x62, 0x1e, 0xa4, 0x48, 0x49, 0x49, + 0xe4, 0xb4, 0x8b, 0x04, 0xe8, 0x46, 0x9a, 0x19, 0x7e, 0x87, 0x1c, 0x9e, 0x73, 0xbe, 0x6f, 0x66, + 0x08, 0x4e, 0x07, 0xbd, 0x4e, 0x73, 0xaf, 0x8f, 0x43, 0x1b, 0x87, 0xfc, 0xff, 0x20, 0x44, 0x5e, + 0x07, 0xa7, 0x9a, 0x46, 0x10, 0xfa, 0xd4, 0x87, 0x60, 0x34, 0xb2, 0xb6, 0xde, 0xb1, 0x69, 0xb7, + 0xbf, 0x63, 0xb4, 0x7c, 0xb7, 0xd9, 0xf1, 0x3b, 0x7e, 0xb3, 0xe3, 0xfb, 0x1d, 0x07, 0xa3, 0xc0, + 0x26, 0xb2, 0xd9, 0x0c, 0x83, 0x56, 0x93, 0x50, 0x44, 0xfb, 0x44, 0xe0, 0xd7, 0x56, 0x58, 0x20, + 0x6f, 0x72, 0x88, 0x1c, 0xd5, 0x65, 0x38, 0xef, 0xed, 0xf4, 0x77, 0x9b, 0xd4, 0x76, 0x31, 0xa1, + 0xc8, 0x0d, 0x64, 0xc0, 0x09, 0x36, 0x3f, 0xc7, 0xef, 0x08, 0x64, 0xdc, 0x90, 0x17, 0xff, 0x93, + 0xb9, 0x48, 0x7a, 0x98, 0xb6, 0xba, 0xf2, 0x52, 0x4d, 0x5e, 0xda, 0x73, 0x5c, 0xbf, 0x8d, 0x1d, + 0x3e, 0x17, 0x22, 0x7e, 0x65, 0xc4, 0x32, 0x8b, 0x08, 0xfa, 0xa4, 0xcb, 0x7f, 0xe4, 0xe0, 0x87, + 0x2f, 0xa4, 0x63, 0x07, 0x11, 0xdc, 0x6c, 0xe3, 0x5d, 0xdb, 0xb3, 0xa9, 0xed, 0x7b, 0x24, 0xdd, + 0x96, 0x37, 0x39, 0x37, 0xdb, 0x4d, 0xc6, 0x29, 0xae, 0x7f, 0x5d, 0x00, 0xf3, 0x37, 0xfd, 0x9e, + 0x6d, 0xe1, 0xbd, 0x3e, 0x26, 0x14, 0xae, 0x80, 0x12, 0x8f, 0x51, 0x95, 0x9a, 0xd2, 0xa8, 0x5a, + 0xa2, 0xc3, 0x46, 0x1d, 0xdb, 0xb5, 0xa9, 0x9a, 0xaf, 0x29, 0x8d, 0x45, 0x4b, 0x74, 0x20, 0x04, + 0x45, 0x42, 0x71, 0xa0, 0x16, 0x6a, 0x4a, 0xa3, 0x60, 0xf1, 0x36, 0x5c, 0x03, 0x73, 0xb6, 0x47, + 0x71, 0xb8, 0x8f, 0x1c, 0xb5, 0xca, 0xc7, 0x93, 0x3e, 0x7c, 0x1f, 0x54, 0x08, 0x45, 0x21, 0xdd, + 0x26, 0x6a, 0xb1, 0xa6, 0x34, 0xe6, 0xd7, 0xd7, 0x0c, 0x21, 0x85, 0x11, 0x4b, 0x61, 0x6c, 0xc7, + 0x52, 0x98, 0x73, 0x0f, 0x22, 0x3d, 0x77, 0xef, 0x57, 0x5d, 0xb1, 0x62, 0x10, 0xbc, 0x04, 0x4a, + 0xd8, 0x6b, 0x6f, 0x13, 0xb5, 0x74, 0x04, 0xb4, 0x80, 0xc0, 0x33, 0xa0, 0xda, 0xb6, 0x43, 0xdc, + 0x62, 0x9c, 0xa9, 0xe5, 0x9a, 0xd2, 0x58, 0x5a, 0x5f, 0x36, 0x12, 0x69, 0xaf, 0xc6, 0x97, 0xac, + 0x51, 0x14, 0x7b, 0xbd, 0x00, 0xd1, 0xae, 0x5a, 0xe1, 0x4c, 0xf0, 0x36, 0xac, 0x83, 0x32, 0xe9, + 0xa2, 0xb0, 0x4d, 0xd4, 0xb9, 0x5a, 0xa1, 0x51, 0x35, 0xc1, 0x61, 0xa4, 0xcb, 0x11, 0x4b, 0xfe, + 0xc3, 0x8f, 0x40, 0x31, 0x70, 0x90, 0xa7, 0x82, 0x9a, 0xd2, 0x58, 0x30, 0xcf, 0x0d, 0x22, 0x3d, + 0xe3, 0xdd, 0x10, 0xed, 0x22, 0x0f, 0x35, 0x1d, 0xbf, 0x67, 0x37, 0xd3, 0xa2, 0x31, 0x8c, 0x71, + 0x87, 0xd1, 0x7d, 0xdb, 0x41, 0x9e, 0xc5, 0xef, 0x51, 0xff, 0x31, 0x0f, 0x20, 0x93, 0xe7, 0x86, + 0x47, 0x28, 0xf2, 0xe8, 0xcb, 0xa8, 0xf4, 0x2e, 0x28, 0x33, 0x83, 0x6f, 0x13, 0xae, 0xd3, 0xac, + 0xb4, 0x49, 0x4c, 0x96, 0xb7, 0xe2, 0x91, 0x78, 0x2b, 0x4d, 0xe5, 0xad, 0xfc, 0x42, 0xde, 0x2a, + 0x7f, 0x03, 0x6f, 0xdf, 0x15, 0xc1, 0x82, 0xb0, 0x35, 0x09, 0x7c, 0x8f, 0x60, 0x36, 0x81, 0x2d, + 0x5e, 0x1a, 0x04, 0x65, 0x72, 0x02, 0x7c, 0xc4, 0x92, 0x57, 0xe0, 0x07, 0xa0, 0x78, 0x15, 0x51, + 0xc4, 0xe9, 0x9b, 0x5f, 0x5f, 0x31, 0x52, 0xc9, 0xc2, 0xee, 0xc5, 0xae, 0x99, 0xab, 0x8c, 0xa1, + 0xc3, 0x48, 0x5f, 0x6a, 0x23, 0x8a, 0xde, 0xf4, 0x5d, 0x9b, 0x62, 0x37, 0xa0, 0x07, 0x16, 0x47, + 0xc2, 0x77, 0x40, 0xf5, 0x5a, 0x18, 0xfa, 0xe1, 0xf6, 0x41, 0x80, 0x39, 0xdd, 0x55, 0xf3, 0xf8, + 0x61, 0xa4, 0x2f, 0xe3, 0x78, 0x30, 0x85, 0x18, 0x45, 0xc2, 0xff, 0x83, 0x12, 0xef, 0x70, 0x82, + 0xab, 0xe6, 0xf2, 0x61, 0xa4, 0xff, 0x8b, 0x43, 0x52, 0xe1, 0x22, 0x22, 0xab, 0x47, 0x69, 0x26, + 0x3d, 0x12, 0x5b, 0x94, 0xd3, 0xb6, 0x50, 0x41, 0x65, 0x1f, 0x87, 0x84, 0xdd, 0xa6, 0xc2, 0xc7, + 0xe3, 0x2e, 0xbc, 0x02, 0x00, 0x23, 0xc6, 0x26, 0xd4, 0x6e, 0x31, 0x9f, 0x33, 0x32, 0x16, 0x0d, + 0x51, 0xc6, 0x2c, 0x4c, 0xfa, 0x0e, 0x35, 0xa1, 0x64, 0x21, 0x15, 0x68, 0xa5, 0xda, 0xf0, 0xbe, + 0x02, 0x2a, 0x9b, 0x18, 0xb5, 0x71, 0x48, 0xd4, 0x6a, 0xad, 0xd0, 0x98, 0x5f, 0xff, 0x9f, 0x91, + 0xae, 0x59, 0xb7, 0x43, 0xdf, 0xc5, 0xb4, 0x8b, 0xfb, 0x24, 0x16, 0x48, 0x44, 0x9b, 0xbd, 0x41, + 0xa4, 0xef, 0xcc, 0xa2, 0xfa, 0x4c, 0x75, 0xf2, 0x99, 0xcf, 0x39, 0x8c, 0x74, 0xe5, 0x2d, 0x2b, + 0x9e, 0x62, 0xfd, 0x17, 0x05, 0xfc, 0x9b, 0x29, 0xbc, 0xc5, 0xee, 0x4d, 0x52, 0x49, 0xe6, 0x22, + 0xda, 0xea, 0xaa, 0x0a, 0xb3, 0xac, 0x25, 0x3a, 0xe9, 0x22, 0x96, 0xff, 0x4b, 0x45, 0xac, 0x70, + 0xf4, 0x22, 0x16, 0x67, 0x56, 0x71, 0x6a, 0x66, 0x95, 0x9e, 0x95, 0x59, 0xf5, 0x2f, 0x0b, 0xa2, + 0x8a, 0xc4, 0xef, 0x77, 0x84, 0x9c, 0xb8, 0x9e, 0xe4, 0x44, 0x81, 0xcf, 0x36, 0xb1, 0x9a, 0xb8, + 0xd7, 0x8d, 0x36, 0xf6, 0xa8, 0xbd, 0x6b, 0xe3, 0xf0, 0x05, 0x99, 0x91, 0xb2, 0x5b, 0x21, 0x6b, + 0xb7, 0xb4, 0x57, 0x8a, 0xaf, 0xbc, 0x57, 0xc6, 0xb2, 0xa3, 0xf4, 0x12, 0xd9, 0x51, 0x7f, 0x9a, + 0x07, 0xab, 0x4c, 0x8e, 0x9b, 0x68, 0x07, 0x3b, 0x9f, 0x20, 0xf7, 0x88, 0x92, 0x9c, 0x4a, 0x49, + 0x52, 0x35, 0xe1, 0x3f, 0x94, 0xcf, 0x40, 0xf9, 0x37, 0x0a, 0x98, 0x8b, 0x6b, 0x38, 0x34, 0x00, + 0x10, 0x30, 0x5e, 0xa6, 0x05, 0xd1, 0x4b, 0x0c, 0x1c, 0x26, 0xa3, 0x56, 0x2a, 0x02, 0x7e, 0x0e, + 0xca, 0xa2, 0x27, 0xb3, 0xe0, 0x78, 0x2a, 0x0b, 0x68, 0x88, 0x91, 0x7b, 0xa5, 0x8d, 0x02, 0x8a, + 0x43, 0xf3, 0x22, 0x9b, 0xc5, 0x20, 0xd2, 0x4f, 0x3f, 0x8f, 0x22, 0xbe, 0xf3, 0x13, 0x38, 0x26, + 0xae, 0x78, 0xa6, 0x25, 0x9f, 0x50, 0xff, 0x4a, 0x01, 0xc7, 0xd8, 0x44, 0x19, 0x35, 0x89, 0x2b, + 0xae, 0x82, 0xb9, 0x50, 0xb6, 0xf9, 0x74, 0xe7, 0xd7, 0xeb, 0x46, 0x96, 0xd6, 0x29, 0x54, 0x9a, + 0xc5, 0x07, 0x91, 0xae, 0x58, 0x09, 0x12, 0x6e, 0x64, 0x68, 0xcc, 0x4f, 0xa3, 0x91, 0x41, 0x72, + 0x19, 0xe2, 0x7e, 0xca, 0x03, 0x78, 0xc3, 0x6b, 0xe3, 0xbb, 0xcc, 0x7c, 0x23, 0x9f, 0xf6, 0x27, + 0x66, 0x74, 0x72, 0x44, 0xca, 0x64, 0xbc, 0x79, 0x79, 0x10, 0xe9, 0xe7, 0x9f, 0xc7, 0xca, 0x73, + 0xc0, 0xa9, 0x57, 0x48, 0x1b, 0x37, 0xff, 0xea, 0xaf, 0x2b, 0xdf, 0xe7, 0xc1, 0xd2, 0xa7, 0xbe, + 0xd3, 0x77, 0x71, 0x42, 0x9c, 0x3b, 0x41, 0x9c, 0x3a, 0x22, 0x2e, 0x1b, 0x6b, 0x9e, 0x1f, 0x44, + 0xfa, 0xc6, 0x4c, 0xa4, 0x65, 0x81, 0xaf, 0x2f, 0x61, 0xf7, 0xf3, 0x60, 0x65, 0xdb, 0x0f, 0x3e, + 0xde, 0xe2, 0xc7, 0xaa, 0x54, 0x5d, 0xc4, 0x13, 0xb4, 0xad, 0x8c, 0x68, 0x63, 0x88, 0x5b, 0x88, + 0x86, 0xf6, 0x5d, 0x73, 0x63, 0x10, 0xe9, 0xcd, 0x99, 0x28, 0x1b, 0x81, 0x5e, 0x5f, 0xba, 0x7e, + 0xce, 0x83, 0xd5, 0x3b, 0x7d, 0xe4, 0x51, 0xdb, 0xc1, 0x82, 0xb2, 0x84, 0xb0, 0x83, 0x09, 0xc2, + 0xb4, 0x11, 0x61, 0x59, 0x8c, 0xa4, 0xee, 0xbd, 0x41, 0xa4, 0x5f, 0x9c, 0x89, 0xba, 0x69, 0xf0, + 0xd7, 0x97, 0xc4, 0x1f, 0x8a, 0x60, 0x91, 0x1f, 0x1f, 0x12, 0xee, 0xde, 0x00, 0x72, 0xc9, 0x95, + 0xcc, 0xc1, 0x78, 0x8f, 0x16, 0x06, 0x2d, 0x63, 0x4b, 0x2e, 0xc6, 0x22, 0x02, 0x5e, 0x00, 0x65, + 0xc2, 0x77, 0x42, 0xb2, 0xa0, 0x6a, 0xe3, 0xa7, 0x86, 0xec, 0x9e, 0x6b, 0x33, 0x67, 0xc9, 0x78, + 0x76, 0x2e, 0x73, 0xd8, 0x06, 0x20, 0xde, 0x09, 0xd6, 0xc7, 0x91, 0x93, 0xdb, 0x03, 0x86, 0x16, + 0x18, 0x78, 0x0e, 0x94, 0x78, 0xe5, 0x96, 0x27, 0xe9, 0xcc, 0x63, 0x27, 0x4b, 0xe8, 0x66, 0xce, + 0x12, 0xe1, 0x70, 0x1d, 0x14, 0x83, 0xd0, 0x77, 0xe5, 0x2a, 0x7a, 0x72, 0xfc, 0x99, 0xe9, 0x65, + 0x67, 0x33, 0x67, 0xf1, 0x58, 0x78, 0x96, 0x6d, 0x79, 0xd9, 0x7a, 0x45, 0xf8, 0x11, 0x82, 0x95, + 0xac, 0x31, 0x58, 0x0a, 0x12, 0x87, 0xc2, 0xb3, 0xa0, 0xbc, 0xcf, 0xcb, 0x12, 0x3f, 0x5f, 0xb0, + 0xbd, 0x63, 0x0a, 0x94, 0x2d, 0x58, 0xec, 0xbd, 0x44, 0x2c, 0xbc, 0x0e, 0x16, 0xa8, 0x1f, 0xf4, + 0xe2, 0x02, 0x20, 0x8f, 0x1f, 0xb5, 0x34, 0x76, 0x5a, 0x81, 0xd8, 0xcc, 0x59, 0x19, 0x1c, 0xbc, + 0x0d, 0x8e, 0xed, 0x65, 0x6c, 0x8a, 0x09, 0xff, 0x1e, 0x31, 0xc6, 0xf3, 0xf4, 0xec, 0xd9, 0xcc, + 0x59, 0x13, 0x68, 0x13, 0x8c, 0x32, 0xaa, 0xfe, 0x7b, 0x01, 0x2c, 0x48, 0xcf, 0x88, 0xb3, 0xc2, + 0xf9, 0xc4, 0x06, 0xc2, 0x32, 0xff, 0x7d, 0x96, 0x0d, 0x78, 0x78, 0xca, 0x05, 0x6f, 0x27, 0x2e, + 0x10, 0xfe, 0x59, 0x1d, 0x65, 0x29, 0xd7, 0x3f, 0x85, 0x90, 0xca, 0x6f, 0xc4, 0xca, 0x0b, 0xdb, + 0x9c, 0x98, 0xbe, 0xee, 0xc6, 0x28, 0x29, 0xfb, 0x25, 0x50, 0xb1, 0xc5, 0x27, 0x84, 0x69, 0x86, + 0x99, 0xfc, 0xc2, 0xc0, 0x84, 0x94, 0x00, 0xb8, 0x31, 0x92, 0x5f, 0xb8, 0xe6, 0xf8, 0xa4, 0xfc, + 0x09, 0x28, 0x56, 0xff, 0x4c, 0xa2, 0x7e, 0x59, 0x62, 0x26, 0x16, 0xab, 0xe4, 0xc5, 0xa4, 0xf4, + 0x9b, 0x60, 0xce, 0xc5, 0x14, 0xb1, 0xbd, 0xac, 0x5a, 0xe1, 0x75, 0xe3, 0x54, 0x56, 0xaa, 0x11, + 0xdf, 0xc6, 0x2d, 0x19, 0x78, 0xcd, 0xa3, 0xe1, 0x81, 0xdc, 0xb6, 0x24, 0xe8, 0xb5, 0xcb, 0x60, + 0x31, 0x13, 0x00, 0x8f, 0x81, 0x42, 0x0f, 0xc7, 0x5f, 0x4b, 0x58, 0x93, 0x1d, 0xee, 0xf6, 0x91, + 0xd3, 0xc7, 0x9c, 0xf6, 0xaa, 0x25, 0x3a, 0x97, 0xf2, 0x17, 0x14, 0xb3, 0x0a, 0x2a, 0xa1, 0x78, + 0x8a, 0xd9, 0x7e, 0xf8, 0x58, 0xcb, 0x3d, 0x7a, 0xac, 0xe5, 0x9e, 0x3e, 0xd6, 0x94, 0x2f, 0x86, + 0x9a, 0xf2, 0xed, 0x50, 0x53, 0x1e, 0x0c, 0x35, 0xe5, 0xe1, 0x50, 0x53, 0x7e, 0x1b, 0x6a, 0xca, + 0x1f, 0x43, 0x2d, 0xf7, 0x74, 0xa8, 0x29, 0xf7, 0x9e, 0x68, 0xb9, 0x87, 0x4f, 0xb4, 0xdc, 0xa3, + 0x27, 0x5a, 0xee, 0x33, 0xe3, 0x68, 0x25, 0x6c, 0xa7, 0xcc, 0x69, 0xd9, 0xf8, 0x33, 0x00, 0x00, + 0xff, 0xff, 0x33, 0xb4, 0xee, 0x07, 0x17, 0x15, 0x00, 0x00, } func (this *LokiRequest) Equal(that interface{}) bool { @@ -1269,6 +1274,13 @@ func (this *LokiRequest) Equal(that interface{}) bool { return false } } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } return true } func (this *LokiInstantRequest) Equal(that interface{}) bool { @@ -1313,6 +1325,13 @@ func (this *LokiInstantRequest) Equal(that interface{}) bool { return false } } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } return true } func (this *LokiResponse) Equal(that interface{}) bool { @@ -2120,7 +2139,7 @@ func (this *LokiRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 13) + s := make([]string, 0, 14) s = append(s, "&queryrange.LokiRequest{") s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") @@ -2131,6 +2150,7 @@ func (this *LokiRequest) GoString() string { s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n") s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n") + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2138,7 +2158,7 @@ func (this *LokiInstantRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 10) + s := make([]string, 0, 11) s = append(s, "&queryrange.LokiInstantRequest{") s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") @@ -2146,6 +2166,7 @@ func (this *LokiInstantRequest) GoString() string { s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n") s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n") + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2463,6 +2484,18 @@ func (m *LokiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if m.Interval != 0 { i = encodeVarintQueryrange(dAtA, i, uint64(m.Interval)) i-- @@ -2545,6 +2578,18 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if len(m.Shards) > 0 { for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Shards[iNdEx]) @@ -3594,6 +3639,10 @@ func (m *LokiRequest) Size() (n int) { if m.Interval != 0 { n += 1 + sovQueryrange(uint64(m.Interval)) } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovQueryrange(uint64(l)) + } return n } @@ -3625,6 +3674,10 @@ func (m *LokiInstantRequest) Size() (n int) { n += 1 + l + sovQueryrange(uint64(l)) } } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovQueryrange(uint64(l)) + } return n } @@ -4092,6 +4145,7 @@ func (this *LokiRequest) String() string { `Path:` + fmt.Sprintf("%v", this.Path) + `,`, `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, `Interval:` + fmt.Sprintf("%v", this.Interval) + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `}`, }, "") return s @@ -4107,6 +4161,7 @@ func (this *LokiInstantRequest) String() string { `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `}`, }, "") return s @@ -4689,6 +4744,41 @@ func (m *LokiRequest) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var v github_com_grafana_loki_pkg_querier_plan.QueryPlan + m.Plan = &v + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQueryrange(dAtA[iNdEx:]) @@ -4909,6 +4999,41 @@ func (m *LokiInstantRequest) Unmarshal(dAtA []byte) error { } m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var v github_com_grafana_loki_pkg_querier_plan.QueryPlan + m.Plan = &v + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQueryrange(dAtA[iNdEx:]) diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index d5e89eeee47a7..8eb43e34ca160 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -33,6 +33,7 @@ message LokiRequest { logproto.Direction direction = 6; string path = 7; repeated string shards = 8 [(gogoproto.jsontag) = "shards"]; + bytes plan = 10 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; } message LokiInstantRequest { @@ -45,6 +46,7 @@ message LokiInstantRequest { logproto.Direction direction = 4; string path = 5; repeated string shards = 6 [(gogoproto.jsontag) = "shards"]; + bytes plan = 7 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; } message LokiResponse { diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index 26ec924ce5c4f..1df7bb4616fb9 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -185,7 +185,12 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que mapper := logql.NewShardMapper(resolver, ast.metrics) - noop, bytesPerShard, parsed, err := mapper.Parse(r.GetQuery()) + params, err := ParamsFromRequest(r) + if err != nil { + return nil, err + } + + noop, bytesPerShard, parsed, err := mapper.Parse(params.GetExpression()) if err != nil { level.Warn(logger).Log("msg", "failed mapping AST", "err", err.Error(), "query", r.GetQuery()) return nil, err @@ -203,11 +208,6 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que return ast.next.Do(ctx, r) } - params, err := ParamsFromRequest(r) - if err != nil { - return nil, err - } - var path string switch r := r.(type) { case *LokiRequest: @@ -217,7 +217,7 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que default: return nil, fmt.Errorf("expected *LokiRequest or *LokiInstantRequest, got (%T)", r) } - query := ast.ng.Query(ctx, params, parsed) + query := ast.ng.Query(ctx, logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: parsed}) res, err := query.Exec(ctx) if err != nil { diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index 8c77afe0410cf..d3d3ce807ac64 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -19,7 +19,9 @@ import ( "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" "github.com/grafana/loki/pkg/storage/config" @@ -172,7 +174,12 @@ func Test_astMapper(t *testing.T) { 0, ) - resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`{food="bar"}`)) + req := defaultReq() + req.Query = `{foo="bar"}` + req.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(req.Query), + } + resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), req) require.Nil(t, err) require.Equal(t, []*definitions.PrometheusResponseHeader{ @@ -311,7 +318,12 @@ func Test_astMapper_QuerySizeLimits(t *testing.T) { 0, ) - _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(tc.query)) + req := defaultReq() + req.Query = tc.query + req.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.query), + } + _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), req) if err != nil { require.ErrorContains(t, err, tc.err) } @@ -344,7 +356,13 @@ func Test_ShardingByPass(t *testing.T) { 0, ) - _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`1+1`)) + req := defaultReq() + req.Query = `1+1` + req.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(req.Query), + } + + _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), req) require.Nil(t, err) require.Equal(t, called, 1) } @@ -437,6 +455,9 @@ func Test_InstantSharding(t *testing.T) { Query: `rate({app="foo"}[1m])`, TimeTs: util.TimeFromMillis(10), Path: "/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`rate({app="foo"}[1m])`), + }, }) require.NoError(t, err) require.Equal(t, 3, called, "expected 3 calls but got {}", called) @@ -703,6 +724,13 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { 0, ) + // currently all the tests call `defaultReq()` which creates an instance of the type LokiRequest + // if in the future that isn't true, we need another way to access the Plan field of an arbitrary query type + // or we should set the Plan in calls to `GetExpression` if the Plan is nil by calling `ParseExpr` or similar + tc.req.(*LokiRequest).Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.req.GetQuery()), + } + resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), tc.req) require.Nil(t, err) @@ -830,12 +858,16 @@ func Test_ASTMapper_MaxLookBackPeriod(t *testing.T) { 0, ) + q := `{cluster="dev-us-central-0"}` lokiReq := &LokiInstantRequest{ - Query: `{cluster="dev-us-central-0"}`, + Query: q, Limit: 1000, TimeTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(q), + }, } ctx := user.InjectOrgID(context.Background(), "foo") diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 4c19a1ffc1202..b91e088041db9 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -23,8 +23,10 @@ import ( "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/querier/plan" base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/config" @@ -332,12 +334,16 @@ func TestInstantQueryTripperware(t *testing.T) { } require.NoError(t, err) + q := `sum by (job) (bytes_rate({cluster="dev-us-central-0"}[15m]))` lreq := &LokiInstantRequest{ - Query: `sum by (job) (bytes_rate({cluster="dev-us-central-0"}[15m]))`, + Query: q, Limit: 1000, TimeTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(q), + }, } ctx := user.InjectOrgID(context.Background(), "1") @@ -1101,6 +1107,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { TimeTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[2h]))`), + }, }, expectedSplitStats: 2, // [2h] interval split by 1h configured split interval expectedShardStats: 8, // 2 time splits * 4 row shards @@ -1113,6 +1122,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { TimeTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[1h]))`), + }, }, expectedSplitStats: 0, // [1h] interval not split expectedShardStats: 4, // 4 row shards @@ -1127,6 +1139,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[1h]))`), + }, }, expectedSplitStats: 3, // 2 hour range interval split based on the base hour + the remainder expectedShardStats: 12, // 3 time splits * 4 row shards @@ -1141,6 +1156,9 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (app) (rate({app="foo"} |= "foo"[1h]))`), + }, }, expectedSplitStats: 0, // 1 minute range interval not split expectedShardStats: 4, // 4 row shards diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index da8326a678ec5..b4245375bce66 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -383,6 +383,7 @@ func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]quer Path: lokiReq.Path, StartTs: start, EndTs: end, + Plan: lokiReq.Plan, }) }) @@ -403,6 +404,7 @@ func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]quer Path: lokiReq.Path, StartTs: start, EndTs: end, + Plan: lokiReq.Plan, }) } diff --git a/pkg/querier/queryrange/split_by_range.go b/pkg/querier/queryrange/split_by_range.go index e3640761d57ec..6845846d4deaa 100644 --- a/pkg/querier/queryrange/split_by_range.go +++ b/pkg/querier/queryrange/split_by_range.go @@ -47,6 +47,11 @@ func NewSplitByRangeMiddleware(logger log.Logger, engineOpts logql.EngineOpts, l func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) (queryrangebase.Response, error) { logger := util_log.WithContext(ctx, s.logger) + params, err := ParamsFromRequest(request) + if err != nil { + return nil, err + } + tenants, err := tenant.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) @@ -64,7 +69,7 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) ( return nil, err } - noop, parsed, err := mapper.Parse(request.GetQuery()) + noop, parsed, err := mapper.Parse(params.GetExpression()) if err != nil { level.Warn(logger).Log("msg", "failed mapping AST", "err", err.Error(), "query", request.GetQuery()) return nil, err @@ -80,16 +85,11 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) ( queryStatsCtx := stats.FromContext(ctx) queryStatsCtx.AddSplitQueries(int64(mapperStats.GetSplitQueries())) - params, err := ParamsFromRequest(request) - if err != nil { - return nil, err - } - if _, ok := request.(*LokiInstantRequest); !ok { - return nil, fmt.Errorf("expected *LokiInstantRequest") + return nil, fmt.Errorf("expected *LokiInstantRequest, got %T", request) } - query := s.ng.Query(ctx, params, parsed) + query := s.ng.Query(ctx, logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: parsed}) res, err := query.Exec(ctx) if err != nil { diff --git a/pkg/querier/queryrange/split_by_range_test.go b/pkg/querier/queryrange/split_by_range_test.go index c3b4587a1dbb1..ef25e3f910fb3 100644 --- a/pkg/querier/queryrange/split_by_range_test.go +++ b/pkg/querier/queryrange/split_by_range_test.go @@ -6,13 +6,14 @@ import ( "testing" "time" - "github.com/grafana/loki/pkg/loghttp" - "github.com/go-kit/log" "github.com/grafana/dskit/user" "github.com/stretchr/testify/require" + "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" ) @@ -37,6 +38,9 @@ func Test_RangeVectorSplit(t *testing.T) { Query: `sum(bytes_over_time({app="foo"}[3m]))`, TimeTs: time.Unix(1, 0), Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum(bytes_over_time({app="foo"}[3m]))`), + }, }, subQueries: []queryrangebase.RequestResponse{ subQueryRequestResponse(`sum(bytes_over_time({app="foo"}[1m]))`, 1), @@ -50,6 +54,9 @@ func Test_RangeVectorSplit(t *testing.T) { Query: `sum by (bar) (bytes_over_time({app="foo"}[3m]))`, TimeTs: time.Unix(1, 0), Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (bar) (bytes_over_time({app="foo"}[3m]))`), + }, }, subQueries: []queryrangebase.RequestResponse{ subQueryRequestResponse(`sum by (bar)(bytes_over_time({app="foo"}[1m]))`, 10), @@ -63,6 +70,9 @@ func Test_RangeVectorSplit(t *testing.T) { Query: `sum(count_over_time({app="foo"}[3m]))`, TimeTs: time.Unix(1, 0), Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum(count_over_time({app="foo"}[3m]))`), + }, }, subQueries: []queryrangebase.RequestResponse{ subQueryRequestResponse(`sum(count_over_time({app="foo"}[1m]))`, 1), @@ -76,6 +86,9 @@ func Test_RangeVectorSplit(t *testing.T) { Query: `sum by (bar) (count_over_time({app="foo"}[3m]))`, TimeTs: time.Unix(1, 0), Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (bar) (count_over_time({app="foo"}[3m]))`), + }, }, subQueries: []queryrangebase.RequestResponse{ subQueryRequestResponse(`sum by (bar)(count_over_time({app="foo"}[1m]))`, 0), @@ -89,6 +102,9 @@ func Test_RangeVectorSplit(t *testing.T) { Query: `sum(sum_over_time({app="foo"} | unwrap bar [3m]))`, TimeTs: time.Unix(1, 0), Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum(sum_over_time({app="foo"} | unwrap bar [3m]))`), + }, }, subQueries: []queryrangebase.RequestResponse{ subQueryRequestResponse(`sum(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1), @@ -102,6 +118,9 @@ func Test_RangeVectorSplit(t *testing.T) { Query: `sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3m]))`, TimeTs: time.Unix(1, 0), Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3m]))`), + }, }, subQueries: []queryrangebase.RequestResponse{ subQueryRequestResponse(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1), @@ -140,6 +159,9 @@ func subQueryRequestResponse(expectedSubQuery string, sampleValue float64) query Query: expectedSubQuery, TimeTs: time.Unix(1, 0), Path: "/loki/api/v1/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(expectedSubQuery), + }, }, Response: &LokiPromResponse{ Response: &queryrangebase.PrometheusResponse{ diff --git a/pkg/querier/queryrange/stats.go b/pkg/querier/queryrange/stats.go index 0233d886c98f2..71f93959c3b69 100644 --- a/pkg/querier/queryrange/stats.go +++ b/pkg/querier/queryrange/stats.go @@ -53,13 +53,13 @@ func recordQueryMetrics(data *queryData) { case queryTypeLog, queryTypeMetric: logql.RecordRangeAndInstantQueryMetrics(data.ctx, logger, data.params, data.status, *data.statistics, data.result) case queryTypeLabel: - logql.RecordLabelQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.label, data.params.Query(), data.status, *data.statistics) + logql.RecordLabelQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.label, data.params.QueryString(), data.status, *data.statistics) case queryTypeSeries: logql.RecordSeriesQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.match, data.status, []string{}, *data.statistics) case queryTypeStats: - logql.RecordStatsQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.Query(), data.status, *data.statistics) + logql.RecordStatsQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.QueryString(), data.status, *data.statistics) case queryTypeVolume: - logql.RecordVolumeQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.Query(), data.params.Limit(), data.params.Step(), data.status, *data.statistics) + logql.RecordVolumeQueryMetrics(data.ctx, logger, data.params.Start(), data.params.End(), data.params.QueryString(), data.params.Limit(), data.params.Step(), data.status, *data.statistics) default: level.Error(logger).Log("msg", "failed to record query metrics", "err", fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, got %s", data.queryType)) } diff --git a/pkg/querier/queryrange/stats_test.go b/pkg/querier/queryrange/stats_test.go index 54c9004d88cec..28f8d12de7f6d 100644 --- a/pkg/querier/queryrange/stats_test.go +++ b/pkg/querier/queryrange/stats_test.go @@ -30,7 +30,7 @@ func TestStatsCollectorMiddleware(t *testing.T) { Query: "foo", StartTs: now, }) - require.Equal(t, "foo", data.params.Query()) + require.Equal(t, "foo", data.params.QueryString()) require.Equal(t, true, data.recorded) require.Equal(t, now, data.params.Start()) require.Nil(t, data.statistics) @@ -60,7 +60,7 @@ func TestStatsCollectorMiddleware(t *testing.T) { Query: "foo", StartTs: now, }) - require.Equal(t, "foo", data.params.Query()) + require.Equal(t, "foo", data.params.QueryString()) require.Equal(t, true, data.recorded) require.Equal(t, now, data.params.Start()) require.Equal(t, int32(10), data.statistics.Ingester.TotalReached) @@ -108,7 +108,7 @@ func Test_StatsHTTP(t *testing.T) { }), func(t *testing.T, data *queryData) { require.Equal(t, fmt.Sprintf("%d", http.StatusOK), data.status) - require.Equal(t, "foo", data.params.Query()) + require.Equal(t, "foo", data.params.QueryString()) require.Equal(t, logproto.BACKWARD, data.params.Direction()) require.Equal(t, uint32(100), data.params.Limit()) require.Equal(t, stats.Result{}, *data.statistics) @@ -129,7 +129,7 @@ func Test_StatsHTTP(t *testing.T) { }), func(t *testing.T, data *queryData) { require.Equal(t, fmt.Sprintf("%d", http.StatusTeapot), data.status) - require.Equal(t, "foo", data.params.Query()) + require.Equal(t, "foo", data.params.QueryString()) require.Equal(t, logproto.BACKWARD, data.params.Direction()) require.Equal(t, uint32(100), data.params.Limit()) require.Equal(t, statsResult, *data.statistics) @@ -151,7 +151,7 @@ func Test_StatsHTTP(t *testing.T) { }), func(t *testing.T, data *queryData) { require.Equal(t, fmt.Sprintf("%d", http.StatusTeapot), data.status) - require.Equal(t, "foo", data.params.Query()) + require.Equal(t, "foo", data.params.QueryString()) require.Equal(t, logproto.BACKWARD, data.params.Direction()) require.Equal(t, uint32(100), data.params.Limit()) require.Equal(t, statsResult, *data.statistics) @@ -173,7 +173,7 @@ func Test_StatsHTTP(t *testing.T) { }), func(t *testing.T, data *queryData) { require.Equal(t, fmt.Sprintf("%d", http.StatusTeapot), data.status) - require.Equal(t, "foo", data.params.Query()) + require.Equal(t, "foo", data.params.QueryString()) require.Equal(t, uint32(100), data.params.Limit()) require.Equal(t, statsResult, *data.statistics) require.Equal(t, streams, data.result) diff --git a/pkg/querier/worker/util_test.go b/pkg/querier/worker/util_test.go index a0213e3bb708b..25dd8127a0da4 100644 --- a/pkg/querier/worker/util_test.go +++ b/pkg/querier/worker/util_test.go @@ -61,7 +61,7 @@ func TestHandleQueryRequest(t *testing.T) { } { t.Run(name, func(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "1") - request, err := queryrange.DefaultCodec.QueryRequestWrap(ctx, &queryrange.LokiRequest{}) + request, err := queryrange.DefaultCodec.QueryRequestWrap(ctx, &queryrange.LokiRequest{Query: `{app="foo"}`}) require.NoError(t, err) mockHandler := HandlerFunc(func(context.Context, queryrangebase.Request) (queryrangebase.Response, error) { diff --git a/pkg/ruler/evaluator_local.go b/pkg/ruler/evaluator_local.go index fed0f2f02ef11..91efd5a14d995 100644 --- a/pkg/ruler/evaluator_local.go +++ b/pkg/ruler/evaluator_local.go @@ -28,7 +28,7 @@ func NewLocalEvaluator(engine *logql.Engine, logger log.Logger) (*LocalEvaluator } func (l *LocalEvaluator) Eval(ctx context.Context, qs string, now time.Time) (*logqlmodel.Result, error) { - params := logql.NewLiteralParams( + params, err := logql.NewLiteralParams( qs, now, now, @@ -38,6 +38,9 @@ func (l *LocalEvaluator) Eval(ctx context.Context, qs string, now time.Time) (*l 0, nil, ) + if err != nil { + return nil, err + } q := l.engine.Query(params) res, err := q.Exec(ctx) From e28f7f33635ae30749d55dace537861688cf2900 Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Wed, 22 Nov 2023 16:51:27 +0530 Subject: [PATCH 29/48] update dskit to latest version (#11287) **What this PR does / why we need it**: Update dskit to latest version --- .drone/drone.jsonnet | 4 +- .drone/drone.yml | 6 +- docs/sources/configure/_index.md | 17 +- go.mod | 12 +- go.sum | 23 +- .../frontend/transport/handler.go | 5 +- pkg/lokifrontend/frontend/v2/frontend.go | 3 +- .../github.com/cristalhq/hedgedhttp/README.md | 2 +- .../github.com/cristalhq/hedgedhttp/hedged.go | 110 ++- .../github.com/cristalhq/hedgedhttp/stats.go | 14 + vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CHANGELOG.md | 10 + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + vendor/github.com/google/uuid/README.md | 10 +- vendor/github.com/google/uuid/node_js.go | 2 +- vendor/github.com/google/uuid/uuid.go | 10 +- .../grafana/dskit/concurrency/buffer.go | 7 + .../grafana/dskit/concurrency/worker.go | 38 + .../dskit/grpcclient/instrumentation.go | 6 +- .../grafana/dskit/grpcutil/cancel.go | 25 - .../grafana/dskit/grpcutil/status.go | 70 ++ .../grafana/dskit/httpgrpc/httpgrpc.go | 104 ++- .../grafana/dskit/httpgrpc/server/server.go | 93 +-- .../dskit/kv/memberlist/memberlist_client.go | 146 +++- .../dskit/middleware/grpc_instrumentation.go | 146 ++-- .../grafana/dskit/middleware/grpc_logging.go | 6 + .../grafana/dskit/middleware/zero_response.go | 132 ++++ .../grafana/dskit/modules/module_service.go | 10 +- vendor/github.com/grafana/dskit/ring/batch.go | 151 ++-- .../grafana/dskit/ring/replication_set.go | 5 +- vendor/github.com/grafana/dskit/ring/util.go | 9 +- .../github.com/grafana/dskit/server/limits.go | 32 +- .../github.com/grafana/dskit/server/server.go | 47 +- .../x/oauth2/google/appengine_gen1.go | 1 - .../x/oauth2/google/appengine_gen2_flex.go | 1 - .../x/oauth2/internal/client_appengine.go | 1 - .../api/annotations/field_behavior.pb.go | 22 +- vendor/google.golang.org/grpc/README.md | 2 +- .../grpc/attributes/attributes.go | 4 +- .../grpc/authz/audit/audit_logger.go | 12 +- .../grpc/balancer/balancer.go | 15 + .../grpc/balancer/grpclb/grpclb.go | 50 +- .../grpc/balancer/grpclb/grpclb_picker.go | 9 - .../balancer/grpclb/grpclb_remote_balancer.go | 49 +- .../grpc/balancer/grpclb/grpclb_util.go | 61 -- vendor/google.golang.org/grpc/clientconn.go | 18 +- vendor/google.golang.org/grpc/dialoptions.go | 5 +- .../grpc/encoding/encoding.go | 13 +- .../health/grpc_health_v1/health_grpc.pb.go | 22 +- .../grpc/internal/backoff/backoff.go | 36 + .../internal/balancergroup/balancergroup.go | 17 +- .../grpc/internal/cache/timeoutCache.go | 7 + .../grpc/internal/internal.go | 6 + .../internal/proto/grpc_lookup_v1/rls.pb.go | 130 ++-- .../grpc/internal/status/status.go | 28 + .../grpc/internal/transport/handler_server.go | 13 +- .../grpc/internal/transport/http2_client.go | 13 +- .../grpc/internal/transport/http2_server.go | 14 +- .../grpc/internal/transport/http_util.go | 18 +- .../grpc/internal/transport/transport.go | 2 +- .../google.golang.org/grpc/orca/producer.go | 54 +- .../grpc/resolver/manual/manual.go | 27 +- vendor/google.golang.org/grpc/server.go | 136 ++-- vendor/google.golang.org/grpc/tap/tap.go | 6 + vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 3 + .../grpc/xds/googledirectpath/googlec2p.go | 19 +- .../balancer/cdsbalancer/cdsbalancer.go | 649 +++++++++++------- .../balancer/cdsbalancer/cluster_handler.go | 368 ---------- .../balancer/cdsbalancer/cluster_watcher.go | 58 ++ .../clusterresolver/resource_resolver.go | 2 +- .../xds/internal/resolver/serviceconfig.go | 22 +- .../grpc/xds/internal/xdsclient/client.go | 1 - .../internal/xdsclient/clientimpl_watchers.go | 31 - .../xdsclient/transport/loadreport.go | 69 +- .../internal/xdsclient/transport/transport.go | 52 +- vendor/modules.txt | 16 +- 77 files changed, 1895 insertions(+), 1469 deletions(-) delete mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/grafana/dskit/concurrency/worker.go delete mode 100644 vendor/github.com/grafana/dskit/grpcutil/cancel.go create mode 100644 vendor/github.com/grafana/dskit/grpcutil/status.go create mode 100644 vendor/github.com/grafana/dskit/middleware/zero_response.go delete mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index 0a4d2c66ef9d0..96acc3cd2b5bf 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -160,14 +160,14 @@ local promtail_win() = pipeline('promtail-windows') { steps: [ { name: 'identify-runner', - image: 'golang:1.19-windowsservercore-1809', + image: 'golang:1.21.3-windowsservercore-1809', commands: [ 'Write-Output $env:DRONE_RUNNER_NAME', ], }, { name: 'test', - image: 'golang:1.19-windowsservercore-1809', + image: 'golang:1.21.3-windowsservercore-1809', commands: [ 'go test .\\clients\\pkg\\promtail\\targets\\windows\\... -v', ], diff --git a/.drone/drone.yml b/.drone/drone.yml index eb6d9b8720e15..d4d91d2424339 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -1666,11 +1666,11 @@ platform: steps: - commands: - Write-Output $env:DRONE_RUNNER_NAME - image: golang:1.19-windowsservercore-1809 + image: golang:1.21.3-windowsservercore-1809 name: identify-runner - commands: - go test .\clients\pkg\promtail\targets\windows\... -v - image: golang:1.19-windowsservercore-1809 + image: golang:1.21.3-windowsservercore-1809 name: test trigger: ref: @@ -2106,6 +2106,6 @@ kind: secret name: gpg_private_key --- kind: signature -hmac: caf375427f92f78711f801f56341357b67737330e906346ee908a796c61dd314 +hmac: 8ae9cff1a379503d0b568f727d9c12bcb486a5e8d1fc3271deea32f07939baf1 ... diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index 1523f2454e19b..f6fb4aefe9182 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -335,14 +335,24 @@ grpc_tls_config: # CLI flag: -server.register-instrumentation [register_instrumentation: | default = true] +# If set to true, gRPC statuses will be reported in instrumentation labels with +# their string representations. Otherwise, they will be reported as "error". +# CLI flag: -server.report-grpc-codes-in-instrumentation-label-enabled +[report_grpc_codes_in_instrumentation_label_enabled: | default = false] + # Timeout for graceful shutdowns # CLI flag: -server.graceful-shutdown-timeout [graceful_shutdown_timeout: | default = 30s] -# Read timeout for HTTP server +# Read timeout for entire HTTP request, including headers and body. # CLI flag: -server.http-read-timeout [http_server_read_timeout: | default = 30s] +# Read timeout for HTTP request headers. If set to 0, value of +# -server.http-read-timeout is used. +# CLI flag: -server.http-read-header-timeout +[http_server_read_header_timeout: | default = 0s] + # Write timeout for HTTP server # CLI flag: -server.http-write-timeout [http_server_write_timeout: | default = 30s] @@ -351,6 +361,11 @@ grpc_tls_config: # CLI flag: -server.http-idle-timeout [http_server_idle_timeout: | default = 2m] +# Log closed connections that did not receive any response, most likely because +# client didn't send any request within timeout. +# CLI flag: -server.http-log-closed-connections-without-response-enabled +[http_log_closed_connections_without_response_enabled: | default = false] + # Limit on the size of a gRPC message this server can receive (bytes). # CLI flag: -server.grpc-max-recv-msg-size-bytes [grpc_server_max_recv_msg_size: | default = 4194304] diff --git a/go.mod b/go.mod index 7a82419ec5d6a..cf7b393e8b44d 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash/v2 v2.2.0 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cristalhq/hedgedhttp v0.7.2 + github.com/cristalhq/hedgedhttp v0.9.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/docker/docker v24.0.7+incompatible github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 @@ -45,11 +45,11 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.9 github.com/google/renameio/v2 v2.0.0 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.3.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 - github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 + github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f github.com/grafana/go-gelf/v2 v2.0.1 github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd @@ -104,7 +104,7 @@ require ( golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 google.golang.org/api v0.132.0 - google.golang.org/grpc v1.58.3 + google.golang.org/grpc v1.59.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -135,7 +135,7 @@ require ( go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b - golang.org/x/oauth2 v0.10.0 + golang.org/x/oauth2 v0.11.0 golang.org/x/text v0.13.0 google.golang.org/protobuf v1.31.0 k8s.io/apimachinery v0.28.1 @@ -316,7 +316,7 @@ require ( golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 4bdb7d2237462..42fd5c822b694 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cristalhq/hedgedhttp v0.7.2 h1:RbQacI2n+1fIOslNq/pjgOfBe1RfjAa7hqHpojopCic= -github.com/cristalhq/hedgedhttp v0.7.2/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= +github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= +github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -939,8 +939,9 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= @@ -981,8 +982,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= -github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 h1:wRtcM7fvzg/MJ4KCIYLryadp2fI3pO61BEiY7SizCoI= -github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47/go.mod h1:byPCvaG/pqi33Kq+Wvkp7WhLfmrlyy0RAoYG4yRh01I= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= @@ -2024,8 +2025,8 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2473,8 +2474,8 @@ google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+S google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -2519,8 +2520,8 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go index 06f1ebe1c7b63..2a4781b3bc718 100644 --- a/pkg/lokifrontend/frontend/transport/handler.go +++ b/pkg/lokifrontend/frontend/transport/handler.go @@ -15,7 +15,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/httpgrpc/server" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -241,7 +240,7 @@ func writeError(w http.ResponseWriter, err error) { err = errRequestEntityTooLarge } } - server.WriteError(w, err) + httpgrpc.WriteError(w, err) } func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.Stats) { @@ -277,7 +276,7 @@ func (a *grpcRoundTripperToHandlerAdapter) Do(ctx context.Context, req queryrang return nil, err } - grpcReq, err := server.HTTPRequest(httpReq) + grpcReq, err := httpgrpc.FromHTTPRequest(httpReq) if err != nil { return nil, fmt.Errorf("cannot convert HTTP request to gRPC request: %w", err) } diff --git a/pkg/lokifrontend/frontend/v2/frontend.go b/pkg/lokifrontend/frontend/v2/frontend.go index 4fe591a346a9b..695a054e42580 100644 --- a/pkg/lokifrontend/frontend/v2/frontend.go +++ b/pkg/lokifrontend/frontend/v2/frontend.go @@ -14,7 +14,6 @@ import ( "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/grpcclient" "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/httpgrpc/server" "github.com/grafana/dskit/netutil" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" @@ -317,7 +316,7 @@ func (f *Frontend) Do(ctx context.Context, req queryrangebase.Request) (queryran return nil, fmt.Errorf("cannot convert request to HTTP request: %w", err) } - freq.request, err = server.HTTPRequest(httpReq) + freq.request, err = httpgrpc.FromHTTPRequest(httpReq) if err != nil { return nil, fmt.Errorf("cannot convert HTTP request to gRPC request: %w", err) } diff --git a/vendor/github.com/cristalhq/hedgedhttp/README.md b/vendor/github.com/cristalhq/hedgedhttp/README.md index aec2a1b3548d5..104213b350b12 100644 --- a/vendor/github.com/cristalhq/hedgedhttp/README.md +++ b/vendor/github.com/cristalhq/hedgedhttp/README.md @@ -10,7 +10,7 @@ Hedged HTTP client which helps to reduce tail latency at scale. ## Rationale -See paper [Tail at Scale](https://cacm.acm.org/magazines/2013/2/160173-the-tail-at-scale/fulltext) by Jeffrey Dean, Luiz André Barroso. In short: the client first sends one request, but then sends an additional request after a timeout if the previous hasn't returned an answer in the expected time. The client cancels remaining requests once the first result is received. +See paper [Tail at Scale](https://www.barroso.org/publications/TheTailAtScale.pdf) by Jeffrey Dean, Luiz André Barroso. In short: the client first sends one request, but then sends an additional request after a timeout if the previous hasn't returned an answer in the expected time. The client cancels remaining requests once the first result is received. ## Acknowledge diff --git a/vendor/github.com/cristalhq/hedgedhttp/hedged.go b/vendor/github.com/cristalhq/hedgedhttp/hedged.go index 56d65b0b1c44e..b7b33f50b89d3 100644 --- a/vendor/github.com/cristalhq/hedgedhttp/hedged.go +++ b/vendor/github.com/cristalhq/hedgedhttp/hedged.go @@ -12,6 +12,79 @@ import ( const infiniteTimeout = 30 * 24 * time.Hour // domain specific infinite +// Client represents a hedged HTTP client. +type Client struct { + rt http.RoundTripper + stats *Stats +} + +// Config for the [Client]. +type Config struct { + // Transport of the [Client]. + // Default is nil which results in [net/http.DefaultTransport]. + Transport http.RoundTripper + + // Upto says how much requests to make. + // Default is zero which means no hedged requests will be made. + Upto int + + // Delay before 2 consequitive hedged requests. + Delay time.Duration + + // Next returns the upto and delay for each HTTP that will be hedged. + // Default is nil which results in (Upto, Delay) result. + Next NextFn +} + +// NextFn represents a function that is called for each HTTP request for retrieving hedging options. +type NextFn func() (upto int, delay time.Duration) + +// New returns a new Client for the given config. +func New(cfg Config) (*Client, error) { + switch { + case cfg.Delay < 0: + return nil, errors.New("hedgedhttp: timeout cannot be negative") + case cfg.Upto < 0: + return nil, errors.New("hedgedhttp: upto cannot be negative") + } + if cfg.Transport == nil { + cfg.Transport = http.DefaultTransport + } + + rt, stats, err := NewRoundTripperAndStats(cfg.Delay, cfg.Upto, cfg.Transport) + if err != nil { + return nil, err + } + + // TODO(cristaloleg): this should be removed after internals cleanup. + rt2, ok := rt.(*hedgedTransport) + if !ok { + panic(fmt.Sprintf("want *hedgedTransport got %T", rt)) + } + rt2.next = cfg.Next + + c := &Client{ + rt: rt2, + stats: stats, + } + return c, nil +} + +// Stats returns statistics for the given client, see [Stats] methods. +func (c *Client) Stats() *Stats { + return c.stats +} + +// Do does the same as [RoundTrip], this method is presented to align with [net/http.Client]. +func (c *Client) Do(req *http.Request) (*http.Response, error) { + return c.rt.RoundTrip(req) +} + +// RoundTrip implements [net/http.RoundTripper] interface. +func (c *Client) RoundTrip(req *http.Request) (*http.Response, error) { + return c.rt.RoundTrip(req) +} + // NewClient returns a new http.Client which implements hedged requests pattern. // Given Client starts a new request after a timeout from previous request. // Starts no more than upto requests. @@ -63,8 +136,8 @@ func NewRoundTripperAndStats(timeout time.Duration, upto int, rt http.RoundTripp switch { case timeout < 0: return nil, nil, errors.New("hedgedhttp: timeout cannot be negative") - case upto < 1: - return nil, nil, errors.New("hedgedhttp: upto must be greater than 0") + case upto < 0: + return nil, nil, errors.New("hedgedhttp: upto cannot be negative") } if rt == nil { @@ -88,21 +161,35 @@ type hedgedTransport struct { rt http.RoundTripper timeout time.Duration upto int + next NextFn metrics *Stats } func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) { mainCtx := req.Context() - timeout := ht.timeout + upto, timeout := ht.upto, ht.timeout + if ht.next != nil { + upto, timeout = ht.next() + } + + // no hedged requests, just a regular one. + if upto <= 0 { + return ht.rt.RoundTrip(req) + } + // rollback to default timeout. + if timeout < 0 { + timeout = ht.timeout + } + errOverall := &MultiError{} - resultCh := make(chan indexedResp, ht.upto) - errorCh := make(chan error, ht.upto) + resultCh := make(chan indexedResp, upto) + errorCh := make(chan error, upto) ht.metrics.requestedRoundTripsInc() resultIdx := -1 - cancels := make([]func(), ht.upto) + cancels := make([]func(), upto) defer runInPool(func() { for i, cancel := range cancels { @@ -113,8 +200,8 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) } }) - for sent := 0; len(errOverall.Errors) < ht.upto; sent++ { - if sent < ht.upto { + for sent := 0; len(errOverall.Errors) < upto; sent++ { + if sent < upto { idx := sent subReq, cancel := reqWithCtx(req, mainCtx, idx != 0) cancels[idx] = cancel @@ -132,7 +219,7 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) } // all request sent - effectively disabling timeout between requests - if sent == ht.upto { + if sent == upto { timeout = infiniteTimeout } resp, err := waitResult(mainCtx, resultCh, errorCh, timeout) @@ -140,6 +227,11 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) switch { case resp.Resp != nil: resultIdx = resp.Index + if resultIdx == 0 { + ht.metrics.originalRequestWinsInc() + } else { + ht.metrics.hedgedRequestWinsInc() + } return resp.Resp, nil case mainCtx.Err() != nil: ht.metrics.canceledByUserRoundTripsInc() diff --git a/vendor/github.com/cristalhq/hedgedhttp/stats.go b/vendor/github.com/cristalhq/hedgedhttp/stats.go index fceeb234a22e5..f29331890826a 100644 --- a/vendor/github.com/cristalhq/hedgedhttp/stats.go +++ b/vendor/github.com/cristalhq/hedgedhttp/stats.go @@ -16,6 +16,8 @@ type Stats struct { requestedRoundTrips atomicCounter actualRoundTrips atomicCounter failedRoundTrips atomicCounter + originalRequestWins atomicCounter + hedgedRequestWins atomicCounter canceledByUserRoundTrips atomicCounter canceledSubRequests atomicCounter _ cacheLine @@ -24,6 +26,8 @@ type Stats struct { func (s *Stats) requestedRoundTripsInc() { atomic.AddUint64(&s.requestedRoundTrips.count, 1) } func (s *Stats) actualRoundTripsInc() { atomic.AddUint64(&s.actualRoundTrips.count, 1) } func (s *Stats) failedRoundTripsInc() { atomic.AddUint64(&s.failedRoundTrips.count, 1) } +func (s *Stats) originalRequestWinsInc() { atomic.AddUint64(&s.originalRequestWins.count, 1) } +func (s *Stats) hedgedRequestWinsInc() { atomic.AddUint64(&s.hedgedRequestWins.count, 1) } func (s *Stats) canceledByUserRoundTripsInc() { atomic.AddUint64(&s.canceledByUserRoundTrips.count, 1) } func (s *Stats) canceledSubRequestsInc() { atomic.AddUint64(&s.canceledSubRequests.count, 1) } @@ -42,6 +46,16 @@ func (s *Stats) FailedRoundTrips() uint64 { return atomic.LoadUint64(&s.failedRoundTrips.count) } +// OriginalRequestWins returns count of original requests that were faster than the original. +func (s *Stats) OriginalRequestWins() uint64 { + return atomic.LoadUint64(&s.originalRequestWins.count) +} + +// HedgedRequestWins returns count of hedged requests that were faster than the original. +func (s *Stats) HedgedRequestWins() uint64 { + return atomic.LoadUint64(&s.hedgedRequestWins.count) +} + // CanceledByUserRoundTrips returns count of requests that were canceled by user, using request context. func (s *Stats) CanceledByUserRoundTrips() uint64 { return atomic.LoadUint64(&s.canceledByUserRoundTrips.count) diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60ba9b3..0000000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 0000000000000..2bd78667afbb3 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f136bb..5566888726d98 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f91501..3e9a61889de48 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc90710..b2a0bc8711b3d 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207aeb6fd8..a56138cc4bd04 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") diff --git a/vendor/github.com/grafana/dskit/concurrency/buffer.go b/vendor/github.com/grafana/dskit/concurrency/buffer.go index 623b9a707612f..b8da4423f10e8 100644 --- a/vendor/github.com/grafana/dskit/concurrency/buffer.go +++ b/vendor/github.com/grafana/dskit/concurrency/buffer.go @@ -24,3 +24,10 @@ func (sb *SyncBuffer) String() string { return sb.buf.String() } + +func (sb *SyncBuffer) Reset() { + sb.mu.Lock() + defer sb.mu.Unlock() + + sb.buf.Reset() +} diff --git a/vendor/github.com/grafana/dskit/concurrency/worker.go b/vendor/github.com/grafana/dskit/concurrency/worker.go new file mode 100644 index 0000000000000..f40f0334800b7 --- /dev/null +++ b/vendor/github.com/grafana/dskit/concurrency/worker.go @@ -0,0 +1,38 @@ +package concurrency + +// NewReusableGoroutinesPool creates a new worker pool with the given size. +// These workers will run the workloads passed through Go() calls. +// If all workers are busy, Go() will spawn a new goroutine to run the workload. +func NewReusableGoroutinesPool(size int) *ReusableGoroutinesPool { + p := &ReusableGoroutinesPool{ + jobs: make(chan func()), + } + for i := 0; i < size; i++ { + go func() { + for f := range p.jobs { + f() + } + }() + } + return p +} + +type ReusableGoroutinesPool struct { + jobs chan func() +} + +// Go will run the given function in a worker of the pool. +// If all workers are busy, Go() will spawn a new goroutine to run the workload. +func (p *ReusableGoroutinesPool) Go(f func()) { + select { + case p.jobs <- f: + default: + go f() + } +} + +// Close stops the workers of the pool. +// No new Do() calls should be performed after calling Close(). +// Close does NOT wait for all jobs to finish, it is the caller's responsibility to ensure that in the provided workloads. +// Close is intended to be used in tests to ensure that no goroutines are leaked. +func (p *ReusableGoroutinesPool) Close() { close(p.jobs) } diff --git a/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go b/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go index 4a10ce48d27a8..280f02180c3e9 100644 --- a/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go +++ b/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go @@ -9,14 +9,14 @@ import ( "github.com/grafana/dskit/middleware" ) -func Instrument(requestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { +func Instrument(requestDuration *prometheus.HistogramVec, instrumentationLabelOptions ...middleware.InstrumentationOption) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { return []grpc.UnaryClientInterceptor{ otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), middleware.ClientUserHeaderInterceptor, - middleware.UnaryClientInstrumentInterceptor(requestDuration), + middleware.UnaryClientInstrumentInterceptor(requestDuration, instrumentationLabelOptions...), }, []grpc.StreamClientInterceptor{ otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), middleware.StreamClientUserHeaderInterceptor, - middleware.StreamClientInstrumentInterceptor(requestDuration), + middleware.StreamClientInstrumentInterceptor(requestDuration, instrumentationLabelOptions...), } } diff --git a/vendor/github.com/grafana/dskit/grpcutil/cancel.go b/vendor/github.com/grafana/dskit/grpcutil/cancel.go deleted file mode 100644 index b1d369d2a3ea8..0000000000000 --- a/vendor/github.com/grafana/dskit/grpcutil/cancel.go +++ /dev/null @@ -1,25 +0,0 @@ -// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/grpc/cancel.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Weaveworks Ltd. - -package grpcutil - -import ( - "context" - "errors" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// IsCanceled checks whether an error comes from an operation being canceled -func IsCanceled(err error) bool { - if errors.Is(err, context.Canceled) { - return true - } - s, ok := status.FromError(err) - if ok && s.Code() == codes.Canceled { - return true - } - return false -} diff --git a/vendor/github.com/grafana/dskit/grpcutil/status.go b/vendor/github.com/grafana/dskit/grpcutil/status.go new file mode 100644 index 0000000000000..a9e9aab249a34 --- /dev/null +++ b/vendor/github.com/grafana/dskit/grpcutil/status.go @@ -0,0 +1,70 @@ +package grpcutil + +import ( + "context" + "errors" + + "github.com/gogo/status" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" +) + +// ErrorToStatus returns a *github.com/gogo/status.Status representation of err. +// +// - If err implements the method `GRPCStatus() *google.golang.org/grpc/status.Status` and +// `GRPCStatus()` does not return nil, or if err wraps a type satisfying this, Status from +// `GRPCStatus()` is converted to gogo Status, and returned. In that case, ok is true. +// +// - If err is or GRPCStatus() returns nil, a nil Status is returned and ok is false. +// +// - Otherwise, err is an error not compatible with this function. In this +// case, a nil Status is returned and ok is false. +func ErrorToStatus(err error) (*status.Status, bool) { + if err == nil { + return nil, false + } + type grpcStatus interface{ GRPCStatus() *grpcstatus.Status } + var gs grpcStatus + if errors.As(err, &gs) { + st := gs.GRPCStatus() + if st == nil { + return nil, false + } + return status.FromGRPCStatus(st), true + } + return nil, false +} + +// ErrorToStatusCode extracts gRPC status code from error and returns it. +// +// - If err is nil, codes.OK is returned. +// +// - If err implements (or wraps error that implements) the method +// `GRPCStatus() *google.golang.org/grpc/status.Status`, and +// `GRPCStatus()` returns a non-nil status, code from the status +// is returned. +// +// - Otherwise code.Unknown is returned. +func ErrorToStatusCode(err error) codes.Code { + if err == nil { + return codes.OK + } + type grpcStatus interface{ GRPCStatus() *grpcstatus.Status } + var gs grpcStatus + if errors.As(err, &gs) { + st := gs.GRPCStatus() + if st != nil { + return st.Code() + } + } + return codes.Unknown +} + +// IsCanceled checks whether an error comes from an operation being canceled. +func IsCanceled(err error) bool { + if errors.Is(err, context.Canceled) { + return true + } + statusCode := ErrorToStatusCode(err) + return statusCode == codes.Canceled +} diff --git a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go index 3012edd422ba6..e1f044d8650bb 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go @@ -5,19 +5,105 @@ package httpgrpc import ( + "bytes" "context" "fmt" + "io" + "net/http" "github.com/go-kit/log/level" - "google.golang.org/grpc/metadata" - spb "github.com/gogo/googleapis/google/rpc" "github.com/gogo/protobuf/types" "github.com/gogo/status" + "google.golang.org/grpc/metadata" + "github.com/grafana/dskit/grpcutil" "github.com/grafana/dskit/log" ) +const ( + MetadataMethod = "httpgrpc-method" + MetadataURL = "httpgrpc-url" +) + +// AppendRequestMetadataToContext appends metadata of HTTPRequest into gRPC metadata. +func AppendRequestMetadataToContext(ctx context.Context, req *HTTPRequest) context.Context { + return metadata.AppendToOutgoingContext(ctx, + MetadataMethod, req.Method, + MetadataURL, req.Url) +} + +type nopCloser struct { + *bytes.Buffer +} + +func (nopCloser) Close() error { return nil } + +// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser. +func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer } + +// FromHTTPRequest converts an ordinary http.Request into an httpgrpc.HTTPRequest +func FromHTTPRequest(r *http.Request) (*HTTPRequest, error) { + body, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + return &HTTPRequest{ + Method: r.Method, + Url: r.RequestURI, + Body: body, + Headers: FromHeader(r.Header), + }, nil +} + +// ToHTTPRequest converts httpgrpc.HTTPRequest to http.Request. +func ToHTTPRequest(ctx context.Context, r *HTTPRequest) (*http.Request, error) { + req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)}) + if err != nil { + return nil, err + } + ToHeader(r.Headers, req.Header) + req = req.WithContext(ctx) + req.RequestURI = r.Url + req.ContentLength = int64(len(r.Body)) + return req, nil +} + +// WriteResponse converts an httpgrpc response to an HTTP one +func WriteResponse(w http.ResponseWriter, resp *HTTPResponse) error { + ToHeader(resp.Headers, w.Header()) + w.WriteHeader(int(resp.Code)) + _, err := w.Write(resp.Body) + return err +} + +// WriteError converts an httpgrpc error to an HTTP one +func WriteError(w http.ResponseWriter, err error) { + resp, ok := HTTPResponseFromError(err) + if ok { + _ = WriteResponse(w, resp) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func ToHeader(hs []*Header, header http.Header) { + for _, h := range hs { + header[h.Key] = h.Values + } +} + +func FromHeader(hs http.Header) []*Header { + result := make([]*Header, 0, len(hs)) + for k, vs := range hs { + result = append(result, &Header{ + Key: k, + Values: vs, + }) + } + return result +} + // Errorf returns a HTTP gRPC error than is correctly forwarded over // gRPC, and can eventually be converted back to a HTTP response with // HTTPResponseFromError. @@ -44,7 +130,7 @@ func ErrorFromHTTPResponse(resp *HTTPResponse) error { // HTTPResponseFromError converts a grpc error into an HTTP response func HTTPResponseFromError(err error) (*HTTPResponse, bool) { - s, ok := status.FromError(err) + s, ok := grpcutil.ErrorToStatus(err) if !ok { return nil, false } @@ -62,15 +148,3 @@ func HTTPResponseFromError(err error) (*HTTPResponse, bool) { return &resp, true } - -const ( - MetadataMethod = "httpgrpc-method" - MetadataURL = "httpgrpc-url" -) - -// AppendRequestMetadataToContext appends metadata of HTTPRequest into gRPC metadata. -func AppendRequestMetadataToContext(ctx context.Context, req *HTTPRequest) context.Context { - return metadata.AppendToOutgoingContext(ctx, - MetadataMethod, req.Method, - MetadataURL, req.Url) -} diff --git a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go index b0d808b7b75a1..c642f7fa13fda 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go @@ -5,10 +5,8 @@ package server import ( - "bytes" "context" "fmt" - "io" "net" "net/http" "net/http/httptest" @@ -27,6 +25,13 @@ import ( "github.com/grafana/dskit/middleware" ) +var ( + // DoNotLogErrorHeaderKey is a header key used for marking non-loggable errors. More precisely, if an HTTP response + // has a status code 5xx, and contains a header with key DoNotLogErrorHeaderKey and any values, the generated error + // will be marked as non-loggable. + DoNotLogErrorHeaderKey = http.CanonicalHeaderKey("X-DoNotLogError") +) + // Server implements HTTPServer. HTTPServer is a generated interface that gRPC // servers must implement. type Server struct { @@ -40,35 +45,34 @@ func NewServer(handler http.Handler) *Server { } } -type nopCloser struct { - *bytes.Buffer -} - -func (nopCloser) Close() error { return nil } - -// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser. -func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer } - // Handle implements HTTPServer. func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)}) + req, err := httpgrpc.ToHTTPRequest(ctx, r) if err != nil { return nil, err } - toHeader(r.Headers, req.Header) - req = req.WithContext(ctx) - req.RequestURI = r.Url - req.ContentLength = int64(len(r.Body)) recorder := httptest.NewRecorder() s.handler.ServeHTTP(recorder, req) + header := recorder.Header() + + doNotLogError := false + if _, ok := header[DoNotLogErrorHeaderKey]; ok { + doNotLogError = true + header.Del(DoNotLogErrorHeaderKey) // remove before converting to httpgrpc resp + } + resp := &httpgrpc.HTTPResponse{ Code: int32(recorder.Code), - Headers: fromHeader(recorder.Header()), + Headers: httpgrpc.FromHeader(header), Body: recorder.Body.Bytes(), } if recorder.Code/100 == 5 { - return nil, httpgrpc.ErrorFromHTTPResponse(resp) + err := httpgrpc.ErrorFromHTTPResponse(resp) + if doNotLogError { + err = middleware.DoNotLogError{Err: err} + } + return nil, err } return resp, nil } @@ -153,38 +157,6 @@ func NewClient(address string) (*Client, error) { }, nil } -// HTTPRequest wraps an ordinary HTTPRequest with a gRPC one -func HTTPRequest(r *http.Request) (*httpgrpc.HTTPRequest, error) { - body, err := io.ReadAll(r.Body) - if err != nil { - return nil, err - } - return &httpgrpc.HTTPRequest{ - Method: r.Method, - Url: r.RequestURI, - Body: body, - Headers: fromHeader(r.Header), - }, nil -} - -// WriteResponse converts an httpgrpc response to an HTTP one -func WriteResponse(w http.ResponseWriter, resp *httpgrpc.HTTPResponse) error { - toHeader(resp.Headers, w.Header()) - w.WriteHeader(int(resp.Code)) - _, err := w.Write(resp.Body) - return err -} - -// WriteError converts an httpgrpc error to an HTTP one -func WriteError(w http.ResponseWriter, err error) { - resp, ok := httpgrpc.HTTPResponseFromError(err) - if ok { - _ = WriteResponse(w, resp) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - // ServeHTTP implements http.Handler func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { if tracer := opentracing.GlobalTracer(); tracer != nil { @@ -195,7 +167,7 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - req, err := HTTPRequest(r) + req, err := httpgrpc.FromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -212,25 +184,8 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - if err := WriteResponse(w, resp); err != nil { + if err := httpgrpc.WriteResponse(w, resp); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } - -func toHeader(hs []*httpgrpc.Header, header http.Header) { - for _, h := range hs { - header[h.Key] = h.Values - } -} - -func fromHeader(hs http.Header) []*httpgrpc.Header { - result := make([]*httpgrpc.Header, 0, len(hs)) - for k, vs := range hs { - result = append(result, &httpgrpc.Header{ - Key: k, - Values: vs, - }) - } - return result -} diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index 30a27531fd08d..693964b5ad067 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -222,7 +222,7 @@ func generateRandomSuffix(logger log.Logger) string { // If joining of the cluster if configured, it is done in Running state, and if join fails and Abort flag is set, service // fails. type KV struct { - services.Service + services.NamedService cfg KVConfig logger log.Logger @@ -374,7 +374,8 @@ func NewKV(cfg KVConfig, logger log.Logger, dnsProvider DNSProvider, registerer mlkv.codecs[c.CodecID()] = c } - mlkv.Service = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping) + mlkv.NamedService = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping).WithName("memberlist_kv") + return mlkv } @@ -485,17 +486,17 @@ func (m *KV) running(ctx context.Context) error { tickerChan = t.C } + logger := log.With(m.logger, "phase", "periodic_rejoin") for { select { case <-tickerChan: - members := m.discoverMembers(ctx, m.cfg.JoinMembers) - - reached, err := m.memberlist.Join(members) + const numAttempts = 1 // don't retry if resolution fails, we will try again next time + reached, err := m.joinMembersWithRetries(ctx, numAttempts, logger) if err == nil { - level.Info(m.logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) + level.Info(logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) } else { // Don't report error from rejoin, otherwise KV service would be stopped completely. - level.Warn(m.logger).Log("msg", "re-joining memberlist cluster failed", "err", err) + level.Warn(logger).Log("msg", "re-joining memberlist cluster failed", "err", err, "next_try_in", m.cfg.RejoinInterval) } case <-ctx.Done(): @@ -540,7 +541,7 @@ func (m *KV) fastJoinMembersOnStartup(ctx context.Context) { level.Info(m.logger).Log("msg", "memberlist fast-join starting", "nodes_found", len(nodes), "to_join", toJoin) totalJoined := 0 - for toJoin > 0 && len(nodes) > 0 { + for toJoin > 0 && len(nodes) > 0 && ctx.Err() == nil { reached, err := m.memberlist.Join(nodes[0:1]) // Try to join single node only. if err != nil { level.Debug(m.logger).Log("msg", "fast-joining node failed", "node", nodes[0], "err", err) @@ -568,41 +569,122 @@ func (m *KV) joinMembersOnStartup(ctx context.Context) bool { return true } + logger := log.With(m.logger, "phase", "startup") + level.Info(logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ",")) startTime := time.Now() + reached, err := m.joinMembersWithRetries(ctx, m.cfg.MaxJoinRetries, logger) + if err != nil { + level.Error(logger).Log("msg", "joining memberlist cluster failed", "err", err, "elapsed_time", time.Since(startTime)) + return false + } + level.Info(logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime)) + return true +} - level.Info(m.logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ",")) - - cfg := backoff.Config{ - MinBackoff: m.cfg.MinJoinBackoff, - MaxBackoff: m.cfg.MaxJoinBackoff, - MaxRetries: m.cfg.MaxJoinRetries, +// joinMembersWithRetries joins m.cfg.JoinMembers 100 at a time. After each batch of 100 it rediscoveres the members. +// This helps when the list of members is big and by the time we reach the end the originally resolved addresses may be obsolete. +// joinMembersWithRetries returns an error iff it couldn't successfully join any node OR the context was cancelled. +func (m *KV) joinMembersWithRetries(ctx context.Context, numAttempts int, logger log.Logger) (int, error) { + var ( + cfg = backoff.Config{ + MinBackoff: m.cfg.MinJoinBackoff, + MaxBackoff: m.cfg.MaxJoinBackoff, + MaxRetries: numAttempts, + } + boff = backoff.New(ctx, cfg) + err error + successfullyJoined = 0 + ) + + for ; boff.Ongoing(); boff.Wait() { + successfullyJoined, err = m.joinMembersInBatches(ctx) + if successfullyJoined > 0 { + // If there are _some_ successful joins, then we can consider the join done. + // Mimicking the Join semantics we return an error only when we couldn't join any node at all + err = nil + break + } + level.Warn(logger).Log("msg", "joining memberlist cluster", "attempts", boff.NumRetries()+1, "max_attempts", numAttempts, "err", err) + } + if err == nil && boff.Err() != nil { + err = fmt.Errorf("joining memberlist: %w", boff.Err()) } - boff := backoff.New(ctx, cfg) - var lastErr error + return successfullyJoined, err +} - for boff.Ongoing() { - // We rejoin all nodes, including those that were joined during "fast-join". - // This is harmless and simpler. - nodes := m.discoverMembers(ctx, m.cfg.JoinMembers) +// joinMembersInBatches joins m.cfg.JoinMembers and re-resolves the address of m.cfg.JoinMembers after joining 100 nodes. +// joinMembersInBatches returns the number of nodes joined. joinMembersInBatches returns an error only when the +// number of joined nodes is 0. +func (m *KV) joinMembersInBatches(ctx context.Context) (int, error) { + const batchSize = 100 + var ( + attemptedNodes = make(map[string]bool) + successfullyJoined = 0 + lastErr error + batch = make([]string, batchSize) + nodes []string + ) + for moreAvailableNodes := true; ctx.Err() == nil && moreAvailableNodes; { + // Rediscover nodes and try to join a subset of them with each batch. + // When the list of nodes is large by the time we reach the end of the list some of the + // IPs can be unreachable. + newlyResolved := m.discoverMembers(ctx, m.cfg.JoinMembers) + if len(newlyResolved) > 0 { + // If the resolution fails we keep using the nodes list from the last resolution. + // If that failed too, then we fail the join attempt. + nodes = newlyResolved + } - if len(nodes) > 0 { - reached, err := m.memberlist.Join(nodes) // err is only returned if reached==0. - if err == nil { - level.Info(m.logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime)) - return true + // Prepare batch + batch = batch[:0] + moreAvailableNodes = false + for _, n := range nodes { + if attemptedNodes[n] { + continue } - level.Warn(m.logger).Log("msg", "joining memberlist cluster: failed to reach any nodes", "retries", boff.NumRetries(), "err", err) - lastErr = err - } else { - level.Warn(m.logger).Log("msg", "joining memberlist cluster: found no nodes to join", "retries", boff.NumRetries()) + if len(batch) >= batchSize { + moreAvailableNodes = true + break + } + batch = append(batch, n) + attemptedNodes[n] = true } - boff.Wait() + // Join batch + joinedInBatch, err := m.joinMembersBatch(ctx, batch) + if err != nil { + lastErr = err + } + successfullyJoined += joinedInBatch + } + if successfullyJoined > 0 { + return successfullyJoined, nil + } + if successfullyJoined == 0 && lastErr == nil { + return 0, errors.New("found no nodes to join") } + return 0, lastErr +} - level.Error(m.logger).Log("msg", "joining memberlist cluster failed", "last_error", lastErr, "elapsed_time", time.Since(startTime)) - return false +// joinMembersBatch returns an error only if it couldn't successfully join any nodes or if ctx is cancelled. +func (m *KV) joinMembersBatch(ctx context.Context, nodes []string) (successfullyJoined int, lastErr error) { + for nodeIdx := range nodes { + if ctx.Err() != nil { + return successfullyJoined, fmt.Errorf("joining batch: %w", context.Cause(ctx)) + } + // Attempt to join a single node. + // The cost of calling Join shouldn't be different between passing all nodes in one invocation versus passing a single node per invocation. + reached, err := m.memberlist.Join(nodes[nodeIdx : nodeIdx+1]) + successfullyJoined += reached + if err != nil { + lastErr = err + } + } + if successfullyJoined > 0 { + lastErr = nil + } + return successfullyJoined, lastErr } // Provides a dns-based member disovery to join a memberlist cluster w/o knowning members' addresses upfront. diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go index 70069fa36fadd..e4052b8ed05ff 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go @@ -6,6 +6,7 @@ package middleware import ( "context" + "errors" "io" "strconv" "time" @@ -13,72 +14,69 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "github.com/grafana/dskit/grpcutil" - "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/instrument" ) -func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration) { - respStatus := "success" - if err != nil { - if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { - respStatus = strconv.Itoa(int(errResp.Code)) - } else if grpcutil.IsCanceled(err) { - respStatus = "cancel" - } else { - respStatus = "error" - } - } - instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, respStatus, "false"), duration.Seconds()) +func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration, instrumentLabel instrumentationLabel) { + instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, instrumentLabel.getInstrumentationLabel(err), "false"), duration.Seconds()) } // UnaryServerInstrumentInterceptor instruments gRPC requests for errors and latency. -func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { +func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryServerInterceptor { + instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...) return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { begin := time.Now() resp, err := handler(ctx, req) - observe(ctx, hist, info.FullMethod, err, time.Since(begin)) + observe(ctx, hist, info.FullMethod, err, time.Since(begin), instrumentationLabel) return resp, err } } // StreamServerInstrumentInterceptor instruments gRPC requests for errors and latency. -func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.StreamServerInterceptor { +func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamServerInterceptor { + instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...) return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { begin := time.Now() err := handler(srv, ss) - observe(ss.Context(), hist, info.FullMethod, err, time.Since(begin)) + observe(ss.Context(), hist, info.FullMethod, err, time.Since(begin), instrumentationLabel) return err } } // UnaryClientInstrumentInterceptor records duration of gRPC requests client side. -func UnaryClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.UnaryClientInterceptor { +func UnaryClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryClientInterceptor { + // we enforce masking of HTTP statuses. + instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...) return func(ctx context.Context, method string, req, resp interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { start := time.Now() err := invoker(ctx, method, req, resp, cc, opts...) - metric.WithLabelValues(method, errorCode(err)).Observe(time.Since(start).Seconds()) + metric.WithLabelValues(method, instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(start).Seconds()) return err } } // StreamClientInstrumentInterceptor records duration of streaming gRPC requests client side. -func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.StreamClientInterceptor { +func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamClientInterceptor { + // we enforce masking of HTTP statuses. + instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...) return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption, ) (grpc.ClientStream, error) { start := time.Now() stream, err := streamer(ctx, desc, cc, method, opts...) s := &instrumentedClientStream{ - metric: metric, - start: start, - method: method, - serverStreams: desc.ServerStreams, - finished: atomic.NewBool(false), - finishedChan: make(chan struct{}), - stream: stream, + metric: metric, + start: start, + method: method, + serverStreams: desc.ServerStreams, + finished: atomic.NewBool(false), + finishedChan: make(chan struct{}), + stream: stream, + instrumentationLabel: instrumentationLabel, } s.awaitCompletion(ctx) return s, err @@ -87,13 +85,14 @@ func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.Str // This implementation is heavily inspired by github.com/opentracing-contrib/go-grpc's openTracingClientStream. type instrumentedClientStream struct { - metric *prometheus.HistogramVec - start time.Time - method string - serverStreams bool - finished *atomic.Bool - finishedChan chan struct{} - stream grpc.ClientStream + metric *prometheus.HistogramVec + start time.Time + method string + serverStreams bool + finished *atomic.Bool + finishedChan chan struct{} + stream grpc.ClientStream + instrumentationLabel instrumentationLabel } func (s *instrumentedClientStream) Trailer() metadata.MD { @@ -122,7 +121,7 @@ func (s *instrumentedClientStream) finish(err error) { close(s.finishedChan) - s.metric.WithLabelValues(s.method, errorCode(err)).Observe(time.Since(s.start).Seconds()) + s.metric.WithLabelValues(s.method, s.instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(s.start).Seconds()) } func (s *instrumentedClientStream) SendMsg(m interface{}) error { @@ -173,18 +172,75 @@ func (s *instrumentedClientStream) CloseSend() error { return err } -// errorCode converts an error into an error code string. -func errorCode(err error) string { - if err == nil { - return "2xx" +type InstrumentationOption func(*instrumentationLabel) + +var ( + // ReportGRPCStatusOption is an InstrumentationOption that is used for enabling gRPC status codes to be used + // in instrumentation labels. + ReportGRPCStatusOption InstrumentationOption = func(instrumentationLabel *instrumentationLabel) { + instrumentationLabel.reportGRPCStatus = true + } +) + +func applyInstrumentationOptions(maskHTTPStatuses bool, options ...InstrumentationOption) instrumentationLabel { + instrumentationLabel := instrumentationLabel{maskHTTPStatus: maskHTTPStatuses} + for _, opt := range options { + opt(&instrumentationLabel) + } + return instrumentationLabel +} + +type instrumentationLabel struct { + reportGRPCStatus bool + maskHTTPStatus bool +} + +// getInstrumentationLabel converts an error into an error code string by applying the configurations +// contained in this instrumentationLabel object. +func (i *instrumentationLabel) getInstrumentationLabel(err error) string { + statusCode := errorToStatusCode(err) + return i.statusCodeToString(statusCode) +} + +func (i *instrumentationLabel) statusCodeToString(statusCode codes.Code) string { + if isHTTPStatusCode(statusCode) { + statusFamily := int(statusCode / 100) + if i.maskHTTPStatus { + return strconv.Itoa(statusFamily) + "xx" + } + return strconv.Itoa(int(statusCode)) } - if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { - statusFamily := int(errResp.Code / 100) - return strconv.Itoa(statusFamily) + "xx" - } else if grpcutil.IsCanceled(err) { + if i.reportGRPCStatus { + return statusCode.String() + } + + if statusCode == codes.OK { + if i.maskHTTPStatus { + return "2xx" + } + return "success" + } + + if statusCode == codes.Canceled { return "cancel" - } else { - return "error" } + + return "error" +} + +func errorToStatusCode(err error) codes.Code { + if err == nil { + return codes.OK + } + + if errors.Is(err, context.Canceled) { + return codes.Canceled + } + + return grpcutil.ErrorToStatusCode(err) +} + +func isHTTPStatusCode(statusCode codes.Code) bool { + return int(statusCode) >= 100 && int(statusCode) < 600 } diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go index 7f5db7725c945..feab364743225 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go @@ -29,6 +29,12 @@ type OptionalLogging interface { ShouldLog(ctx context.Context, duration time.Duration) bool } +type DoNotLogError struct{ Err error } + +func (i DoNotLogError) Error() string { return i.Err.Error() } +func (i DoNotLogError) Unwrap() error { return i.Err } +func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false } + // GRPCServerLog logs grpc requests, errors, and latency. type GRPCServerLog struct { Log log.Logger diff --git a/vendor/github.com/grafana/dskit/middleware/zero_response.go b/vendor/github.com/grafana/dskit/middleware/zero_response.go new file mode 100644 index 0000000000000..1bb4ecc8d1f6b --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/zero_response.go @@ -0,0 +1,132 @@ +package middleware + +import ( + "errors" + "net" + "os" + "regexp" + "strconv" + "sync" + + "github.com/go-kit/log" + "go.uber.org/atomic" +) + +// NewZeroResponseListener returns a Listener that logs all connections that encountered io timeout on reads, and were closed before sending any response. +func NewZeroResponseListener(list net.Listener, log log.Logger) net.Listener { + return &zeroResponseListener{ + Listener: list, + log: log, + bufPool: sync.Pool{ + New: func() interface{} { return &bufHolder{buf: make([]byte, 0, requestBufSize)} }, + }, + } +} + +// Wrap a slice in a struct, so we can store a pointer in sync.Pool +type bufHolder struct { + buf []byte +} + +// Size of buffer for read data. We log this eventually. +const requestBufSize = 512 + +type zeroResponseListener struct { + net.Listener + log log.Logger + bufPool sync.Pool // pool of &bufHolder. +} + +func (zl *zeroResponseListener) Accept() (net.Conn, error) { + conn, err := zl.Listener.Accept() + if err != nil { + return nil, err + } + bh := zl.bufPool.Get().(*bufHolder) + bh.buf = bh.buf[:0] + return &zeroResponseConn{Conn: conn, log: zl.log, bufHolder: bh, returnPool: &zl.bufPool}, nil +} + +type zeroResponseConn struct { + net.Conn + + log log.Logger + once sync.Once + returnPool *sync.Pool + + bufHolderMux sync.Mutex + bufHolder *bufHolder // Buffer with first requestBufSize bytes from connection. Set to nil as soon as data is written to the connection. + + lastReadErrIsDeadlineExceeded atomic.Bool +} + +func (zc *zeroResponseConn) Read(b []byte) (n int, err error) { + n, err = zc.Conn.Read(b) + if err != nil && errors.Is(err, os.ErrDeadlineExceeded) { + zc.lastReadErrIsDeadlineExceeded.Store(true) + } else { + zc.lastReadErrIsDeadlineExceeded.Store(false) + } + + // Store first requestBufSize read bytes on connection into the buffer for logging. + if n > 0 { + zc.bufHolderMux.Lock() + defer zc.bufHolderMux.Unlock() + + if zc.bufHolder != nil { + rem := requestBufSize - len(zc.bufHolder.buf) // how much space is in our buffer. + if rem > n { + rem = n + } + if rem > 0 { + zc.bufHolder.buf = append(zc.bufHolder.buf, b[:rem]...) + } + } + } + return +} + +func (zc *zeroResponseConn) Write(b []byte) (n int, err error) { + n, err = zc.Conn.Write(b) + if n > 0 { + zc.bufHolderMux.Lock() + if zc.bufHolder != nil { + zc.returnPool.Put(zc.bufHolder) + zc.bufHolder = nil + } + zc.bufHolderMux.Unlock() + } + return +} + +var authRegexp = regexp.MustCompile(`((?i)\r\nauthorization:\s+)(\S+\s+)(\S+)`) + +func (zc *zeroResponseConn) Close() error { + err := zc.Conn.Close() + + zc.once.Do(func() { + zc.bufHolderMux.Lock() + defer zc.bufHolderMux.Unlock() + + // If buffer was already returned, it means there was some data written on the connection, nothing to do. + if zc.bufHolder == nil { + return + } + + // If we didn't write anything to this connection, and we've got timeout while reading data, it looks like + // slow a slow client failing to send a request to us. + if !zc.lastReadErrIsDeadlineExceeded.Load() { + return + } + + b := zc.bufHolder.buf + b = authRegexp.ReplaceAll(b, []byte("${1}${2}***")) // Replace value in Authorization header with ***. + + _ = zc.log.Log("msg", "read timeout, connection closed with no response", "read", strconv.Quote(string(b)), "remote", zc.RemoteAddr().String()) + + zc.returnPool.Put(zc.bufHolder) + zc.bufHolder = nil + }) + + return err +} diff --git a/vendor/github.com/grafana/dskit/modules/module_service.go b/vendor/github.com/grafana/dskit/modules/module_service.go index 8ca4e25714de4..a0fcdb876fcde 100644 --- a/vendor/github.com/grafana/dskit/modules/module_service.go +++ b/vendor/github.com/grafana/dskit/modules/module_service.go @@ -79,13 +79,19 @@ func (w *moduleService) start(serviceContext context.Context) error { // we don't want to let this service to stop until all dependant services are stopped, // so we use independent context here - level.Info(w.logger).Log("msg", "initialising", "module", w.name) + level.Info(w.logger).Log("msg", "starting", "module", w.name) err := w.service.StartAsync(context.Background()) if err != nil { return errors.Wrapf(err, "error starting module: %s", w.name) } - return w.service.AwaitRunning(serviceContext) + err = w.service.AwaitRunning(serviceContext) + if err != nil { + // Make sure that underlying service is stopped before returning + // (e.g. in case of context cancellation, AwaitRunning returns early, but service may still be starting). + _ = services.StopAndAwaitTerminated(context.Background(), w.service) + } + return errors.Wrapf(err, "starting module %s", w.name) } func (w *moduleService) run(serviceContext context.Context) error { diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go index fa627445ed2c6..5acd8fd008620 100644 --- a/vendor/github.com/grafana/dskit/ring/batch.go +++ b/vendor/github.com/grafana/dskit/ring/batch.go @@ -8,7 +8,8 @@ import ( "sync" "go.uber.org/atomic" - "google.golang.org/grpc/status" + + grpcUtils "github.com/grafana/dskit/grpcutil" ) type batchTracker struct { @@ -25,40 +26,79 @@ type instance struct { } type itemTracker struct { - minSuccess int - maxFailures int - succeeded atomic.Int32 - failed4xx atomic.Int32 - failed5xx atomic.Int32 - remaining atomic.Int32 - err atomic.Error + minSuccess int + maxFailures int + succeeded atomic.Int32 + failedClient atomic.Int32 + failedServer atomic.Int32 + remaining atomic.Int32 + err atomic.Error } -func (i *itemTracker) recordError(err error) int32 { +func (i *itemTracker) recordError(err error, isClientError func(error) bool) int32 { i.err.Store(err) - if s, ok := status.FromError(err); ok && s.Code()/100 == 4 { - return i.failed4xx.Inc() + if isClientError(err) { + return i.failedClient.Inc() } + return i.failedServer.Inc() +} - return i.failed5xx.Inc() +func isHTTPStatus4xx(err error) bool { + code := grpcUtils.ErrorToStatusCode(err) + return code/100 == 4 } -// DoBatch request against a set of keys in the ring, handling replication and -// failures. For example if we want to write N items where they may all -// hit different instances, and we want them all replicated R ways with -// quorum writes, we track the relationship between batch RPCs and the items -// within them. -// -// Callback is passed the instance to target, and the indexes of the keys -// to send to that instance. +// DoBatch is a deprecated version of DoBatchWithOptions where grpc errors containing status codes 4xx are treated as client errors. +// Deprecated. Use DoBatchWithOptions instead. +func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { + return DoBatchWithOptions(ctx, op, r, keys, callback, DoBatchOptions{ + Cleanup: cleanup, + IsClientError: isHTTPStatus4xx, + }) +} + +// DoBatchOptions defines options for the DoBatchWithOptions call. +// Zero value options are valid, as well as individual zero valued fields. +type DoBatchOptions struct { + // Cleanup is always called, either on an error before starting the batches or after they are all finished. + // If nil, a noop will be called. + Cleanup func() + + // IsClientError classifies errors returned by `callback()` into client or server errors. + // See `batchTracker.record()` function for details about how errors are combined into final error returned by DoBatchWithClientError. + // If nil, a default implementation is used that classifies grpc errors containing status codes 4xx as client errors. + IsClientError func(error) bool + + // Go will be used to spawn the callback goroutines, and can be used to use a worker pool like concurrency.ReusableGoroutinesPool. + Go func(func()) +} + +func (o *DoBatchOptions) replaceZeroValuesWithDefaults() { + if o.Cleanup == nil { + o.Cleanup = func() {} + } + if o.IsClientError == nil { + o.IsClientError = isHTTPStatus4xx + } + if o.Go == nil { + o.Go = func(f func()) { go f() } + } +} + +// DoBatchWithOptions request against a set of keys in the ring, handling replication and failures. +// For example if we want to write N items where they may all hit different instances, +// and we want them all replicated R ways with quorum writes, +// we track the relationship between batch RPCs and the items within them. // -// cleanup() is always called, either on an error before starting the batches or after they all finish. +// See comments on DoBatchOptions for available options for this call. // -// Not implemented as a method on Ring so we can test separately. -func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { +// Not implemented as a method on Ring, so we can test separately. +func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error { + o.replaceZeroValuesWithDefaults() + if r.InstancesCount() <= 0 { - cleanup() + o.Cleanup() return fmt.Errorf("DoBatch: InstancesCount <= 0") } expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() @@ -73,7 +113,7 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb for i, key := range keys { replicationSet, err := r.Get(key, op, bufDescs[:0], bufHosts[:0], bufZones[:0]) if err != nil { - cleanup() + o.Cleanup() return err } itemTrackers[i].minSuccess = len(replicationSet.Instances) - replicationSet.MaxErrors @@ -104,19 +144,19 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb wg.Add(len(instances)) for _, i := range instances { - go func(i instance) { + i := i + o.Go(func() { err := callback(i.desc, i.indexes) - tracker.record(i.itemTrackers, err) + tracker.record(i.itemTrackers, err, o.IsClientError) wg.Done() - }(i) + }) } // Perform cleanup at the end. - go func() { + o.Go(func() { wg.Wait() - - cleanup() - }() + o.Cleanup() + }) select { case err := <-tracker.err: @@ -128,35 +168,36 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb } } -func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { +func (b *batchTracker) record(itemTrackers []*itemTracker, err error, isClientError func(error) bool) { // If we reach the required number of successful puts on this item, then decrement the // number of pending items by one. // // The use of atomic increments here is needed as: // * rpcsPending and rpcsFailed guarantee only a single goroutine will write to either channel - // * succeeded, failed4xx, failed5xx and remaining guarantee that the "return decision" is made atomically + // * succeeded, failedClient, failedServer and remaining guarantee that the "return decision" is made atomically // avoiding race condition - for i := range itemTrackers { + for _, it := range itemTrackers { if err != nil { // Track the number of errors by error family, and if it exceeds maxFailures // shortcut the waiting rpc. - errCount := itemTrackers[i].recordError(err) + errCount := it.recordError(err, isClientError) // We should return an error if we reach the maxFailure (quorum) on a given error family OR - // we don't have any remaining instances to try. + // we don't have any remaining instances to try. In the following we use ClientError and ServerError + // to denote errors, for which isClientError() returns true and false respectively. // - // Ex: 2xx, 4xx, 5xx -> return 5xx - // Ex: 4xx, 4xx, _ -> return 4xx - // Ex: 5xx, _, 5xx -> return 5xx + // Ex: Success, ClientError, ServerError -> return ServerError + // Ex: ClientError, ClientError, Success -> return ClientError + // Ex: ServerError, Success, ServerError -> return ServerError // - // The reason for searching for quorum in 4xx and 5xx errors separately is to give a more accurate - // response to the initial request. So if a quorum of instances rejects the request with 4xx, then the request should be rejected - // even if less-than-quorum instances indicated a failure to process the request (via 5xx). + // The reason for searching for quorum in ClientError and ServerError errors separately is to give a more accurate + // response to the initial request. So if a quorum of instances rejects the request with ClientError, then the request should be rejected + // even if less-than-quorum instances indicated a failure to process the request (via ServerError). // The speculation is that had the unavailable instances been available, - // they would have rejected the request with a 4xx as well. - // Conversely, if a quorum of instances failed to process the request via 5xx and less-than-quorum - // instances rejected it with 4xx, then we do not have quorum to reject the request as a 4xx. Instead, - // we return the last 5xx error for debuggability. - if errCount > int32(itemTrackers[i].maxFailures) || itemTrackers[i].remaining.Dec() == 0 { + // they would have rejected the request with a ClientError as well. + // Conversely, if a quorum of instances failed to process the request via ServerError and less-than-quorum + // instances rejected it with ClientError, then we do not have quorum to reject the request as a ClientError. Instead, + // we return the last ServerError error for debuggability. + if errCount > int32(it.maxFailures) || it.remaining.Dec() == 0 { if b.rpcsFailed.Inc() == 1 { b.err <- err } @@ -164,7 +205,8 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { } else { // If we successfully process items in minSuccess instances, // then wake up the waiting rpc, so it can return early. - if itemTrackers[i].succeeded.Inc() >= int32(itemTrackers[i].minSuccess) { + succeeded := it.succeeded.Inc() + if succeeded == int32(it.minSuccess) { if b.rpcsPending.Dec() == 0 { b.done <- struct{}{} } @@ -172,11 +214,12 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { } // If we successfully called this particular instance, but we don't have any remaining instances to try, - // and we failed to call minSuccess instances, then we need to return the last error - // Ex: 4xx, 5xx, 2xx - if itemTrackers[i].remaining.Dec() == 0 { - if b.rpcsFailed.Inc() == 1 { - b.err <- itemTrackers[i].err.Load() + // and we failed to call minSuccess instances, then we need to return the last error. + if succeeded < int32(it.minSuccess) { + if it.remaining.Dec() == 0 { + if b.rpcsFailed.Inc() == 1 { + b.err <- it.err.Load() + } } } } diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go index cc43331e44d95..f389f4766fc55 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set.go @@ -9,6 +9,7 @@ import ( kitlog "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/opentracing/opentracing-go/ext" "github.com/grafana/dskit/spanlogger" ) @@ -294,7 +295,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex terminate := func(err error) ([]T, error) { if cfg.Logger != nil { - _ = cfg.Logger.Error(err) + ext.Error.Set(cfg.Logger.Span, true) } contextTracker.cancelAllContexts() @@ -325,7 +326,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex resultsRemaining-- if result.err != nil && cfg.IsTerminalError != nil && cfg.IsTerminalError(result.err) { - level.Error(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err) + level.Warn(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err) // We must return before calling resultTracker.done() below, otherwise done() might start further requests if request minimisation is enabled. return terminate(result.err) } diff --git a/vendor/github.com/grafana/dskit/ring/util.go b/vendor/github.com/grafana/dskit/ring/util.go index b5ee485ef25c6..a21c0f2fe2cad 100644 --- a/vendor/github.com/grafana/dskit/ring/util.go +++ b/vendor/github.com/grafana/dskit/ring/util.go @@ -7,6 +7,7 @@ import ( "time" "github.com/go-kit/log" + "golang.org/x/exp/slices" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/netutil" @@ -127,9 +128,11 @@ func getZones(tokens map[string][]uint32) []string { // searchToken returns the offset of the tokens entry holding the range for the provided key. func searchToken(tokens []uint32, key uint32) int { - i := sort.Search(len(tokens), func(x int) bool { - return tokens[x] > key - }) + i, found := slices.BinarySearch(tokens, key) + if found { + // we want the first token > key, not >= key + i = i + 1 + } if i >= len(tokens) { i = 0 } diff --git a/vendor/github.com/grafana/dskit/server/limits.go b/vendor/github.com/grafana/dskit/server/limits.go index 6b18bb1cb0c2c..4a8651e323abc 100644 --- a/vendor/github.com/grafana/dskit/server/limits.go +++ b/vendor/github.com/grafana/dskit/server/limits.go @@ -4,6 +4,7 @@ import ( "context" "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/tap" ) @@ -11,19 +12,15 @@ import ( type GrpcInflightMethodLimiter interface { // RPCCallStarting is called before request has been read into memory. // All that's known about the request at this point is grpc method name. + // + // Returned context is used during the remainder of the gRPC call. + // // Returned error should be convertible to gRPC Status via status.FromError, // otherwise gRPC-server implementation-specific error will be returned to the client (codes.PermissionDenied in grpc@v1.55.0). - RPCCallStarting(methodName string) error - RPCCallFinished(methodName string) -} - -// Custom type to hide it from other packages. -type grpcLimitCheckContextKey int + RPCCallStarting(ctx context.Context, methodName string, md metadata.MD) (context.Context, error) -// Presence of this key in the context indicates that inflight request counter was increased for this request, and needs to be decreased when request ends. -const ( - requestFullMethod grpcLimitCheckContextKey = 1 -) + RPCCallFinished(ctx context.Context) +} func newGrpcInflightLimitCheck(methodLimiter GrpcInflightMethodLimiter) *grpcInflightLimitCheck { return &grpcInflightLimitCheck{ @@ -38,8 +35,8 @@ type grpcInflightLimitCheck struct { } // TapHandle is called after receiving grpc request and headers, but before reading any request data yet. -// If we reject request here, it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler). -// If we accept request (not return error), eventually HandleRPC with stats.End notification will be called. +// If we reject request here (by returning non-nil error), it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler). +// If we accept request (no error), eventually HandleRPC with stats.End notification will be called. func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) (context.Context, error) { if !isMethodNameValid(info.FullMethodName) { // If method name is not valid, we let the request continue, but not call method limiter. @@ -47,12 +44,7 @@ func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) return ctx, nil } - if err := g.methodLimiter.RPCCallStarting(info.FullMethodName); err != nil { - return ctx, err - } - - ctx = context.WithValue(ctx, requestFullMethod, info.FullMethodName) - return ctx, nil + return g.methodLimiter.RPCCallStarting(ctx, info.FullMethodName, info.Header) } func (g *grpcInflightLimitCheck) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context { @@ -65,9 +57,7 @@ func (g *grpcInflightLimitCheck) HandleRPC(ctx context.Context, rpcStats stats.R return } - if name, ok := ctx.Value(requestFullMethod).(string); ok { - g.methodLimiter.RPCCallFinished(name) - } + g.methodLimiter.RPCCallFinished(ctx) } func (g *grpcInflightLimitCheck) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index 9e65b01053809..2b54283df7f21 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -92,15 +92,19 @@ type Config struct { HTTPTLSConfig TLSConfig `yaml:"http_tls_config"` GRPCTLSConfig TLSConfig `yaml:"grpc_tls_config"` - RegisterInstrumentation bool `yaml:"register_instrumentation"` - ExcludeRequestInLog bool `yaml:"-"` - DisableRequestSuccessLog bool `yaml:"-"` + RegisterInstrumentation bool `yaml:"register_instrumentation"` + ReportGRPCCodesInInstrumentationLabel bool `yaml:"report_grpc_codes_in_instrumentation_label_enabled"` + ExcludeRequestInLog bool `yaml:"-"` + DisableRequestSuccessLog bool `yaml:"-"` ServerGracefulShutdownTimeout time.Duration `yaml:"graceful_shutdown_timeout"` HTTPServerReadTimeout time.Duration `yaml:"http_server_read_timeout"` + HTTPServerReadHeaderTimeout time.Duration `yaml:"http_server_read_header_timeout"` HTTPServerWriteTimeout time.Duration `yaml:"http_server_write_timeout"` HTTPServerIdleTimeout time.Duration `yaml:"http_server_idle_timeout"` + HTTPLogClosedConnectionsWithoutResponse bool `yaml:"http_log_closed_connections_without_response_enabled"` + GRPCOptions []grpc.ServerOption `yaml:"-"` GRPCMiddleware []grpc.UnaryServerInterceptor `yaml:"-"` GRPCStreamMiddleware []grpc.StreamServerInterceptor `yaml:"-"` @@ -109,9 +113,9 @@ type Config struct { DoNotAddDefaultHTTPMiddleware bool `yaml:"-"` RouteHTTPToGRPC bool `yaml:"-"` - GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` + GRPCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` - GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` + GRPCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"` GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"` GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"` @@ -167,13 +171,16 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.GRPCListenPort, "server.grpc-listen-port", 9095, "gRPC server listen port.") f.IntVar(&cfg.GRPCConnLimit, "server.grpc-conn-limit", 0, "Maximum number of simultaneous grpc connections, <=0 to disable") f.BoolVar(&cfg.RegisterInstrumentation, "server.register-instrumentation", true, "Register the intrumentation handlers (/metrics etc).") + f.BoolVar(&cfg.ReportGRPCCodesInInstrumentationLabel, "server.report-grpc-codes-in-instrumentation-label-enabled", false, "If set to true, gRPC statuses will be reported in instrumentation labels with their string representations. Otherwise, they will be reported as \"error\".") f.DurationVar(&cfg.ServerGracefulShutdownTimeout, "server.graceful-shutdown-timeout", 30*time.Second, "Timeout for graceful shutdowns") - f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for HTTP server") + f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for entire HTTP request, including headers and body.") + f.DurationVar(&cfg.HTTPServerReadHeaderTimeout, "server.http-read-header-timeout", 0, "Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used.") f.DurationVar(&cfg.HTTPServerWriteTimeout, "server.http-write-timeout", 30*time.Second, "Write timeout for HTTP server") f.DurationVar(&cfg.HTTPServerIdleTimeout, "server.http-idle-timeout", 120*time.Second, "Idle timeout for HTTP server") - f.IntVar(&cfg.GPRCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") + f.BoolVar(&cfg.HTTPLogClosedConnectionsWithoutResponse, "server.http-log-closed-connections-without-response-enabled", false, "Log closed connections that did not receive any response, most likely because client didn't send any request within timeout.") + f.IntVar(&cfg.GRPCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") f.IntVar(&cfg.GRPCServerMaxSendMsgSize, "server.grpc-max-send-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can send (bytes).") - f.UintVar(&cfg.GPRCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited)") + f.UintVar(&cfg.GRPCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited)") f.DurationVar(&cfg.GRPCServerMaxConnectionIdle, "server.grpc.keepalive.max-connection-idle", infinty, "The duration after which an idle connection should be closed. Default: infinity") f.DurationVar(&cfg.GRPCServerMaxConnectionAge, "server.grpc.keepalive.max-connection-age", infinty, "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity") f.DurationVar(&cfg.GRPCServerMaxConnectionAgeGrace, "server.grpc.keepalive.max-connection-age-grace", infinty, "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity") @@ -259,6 +266,9 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { return nil, err } httpListener = middleware.CountingListener(httpListener, metrics.TCPConnections.WithLabelValues("http")) + if cfg.HTTPLogClosedConnectionsWithoutResponse { + httpListener = middleware.NewZeroResponseListener(httpListener, level.Warn(logger)) + } metrics.TCPConnectionsLimit.WithLabelValues("http").Set(float64(cfg.HTTPConnLimit)) if cfg.HTTPConnLimit > 0 { @@ -346,17 +356,21 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { WithRequest: !cfg.ExcludeRequestInLog, DisableRequestSuccessLog: cfg.DisableRequestSuccessLog, } + var reportGRPCStatusesOptions []middleware.InstrumentationOption + if cfg.ReportGRPCCodesInInstrumentationLabel { + reportGRPCStatusesOptions = []middleware.InstrumentationOption{middleware.ReportGRPCStatusOption} + } grpcMiddleware := []grpc.UnaryServerInterceptor{ serverLog.UnaryServerInterceptor, otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), - middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration), + middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), } grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) grpcStreamMiddleware := []grpc.StreamServerInterceptor{ serverLog.StreamServerInterceptor, otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()), - middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration), + middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), } grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) @@ -378,9 +392,9 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpc.ChainStreamInterceptor(grpcStreamMiddleware...), grpc.KeepaliveParams(grpcKeepAliveOptions), grpc.KeepaliveEnforcementPolicy(grpcKeepAliveEnforcementPolicy), - grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize), + grpc.MaxRecvMsgSize(cfg.GRPCServerMaxRecvMsgSize), grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize), - grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)), + grpc.MaxConcurrentStreams(uint32(cfg.GRPCServerMaxConcurrentStreams)), grpc.NumStreamWorkers(uint32(cfg.GRPCServerNumWorkers)), } @@ -457,10 +471,11 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { } httpServer := &http.Server{ - ReadTimeout: cfg.HTTPServerReadTimeout, - WriteTimeout: cfg.HTTPServerWriteTimeout, - IdleTimeout: cfg.HTTPServerIdleTimeout, - Handler: middleware.Merge(httpMiddleware...).Wrap(router), + ReadTimeout: cfg.HTTPServerReadTimeout, + ReadHeaderTimeout: cfg.HTTPServerReadHeaderTimeout, + WriteTimeout: cfg.HTTPServerWriteTimeout, + IdleTimeout: cfg.HTTPServerIdleTimeout, + Handler: middleware.Merge(httpMiddleware...).Wrap(router), } if httpTLSConfig != nil { httpServer.TLSConfig = httpTLSConfig diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 16c6c6b90ce50..e61587945b08f 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index a7e27b3d2991c..9c79aa0a0cc5d 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !appengine -// +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index e1755d1d9acf4..d28140f789ec9 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine package internal diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index dbe2e2d0c6579..6ce01ac9a69c7 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/api/field_behavior.proto package annotations @@ -78,6 +78,19 @@ const ( // a non-empty value will be returned. The user will not be aware of what // non-empty value to expect. FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 ) // Enum value maps for FieldBehavior. @@ -91,6 +104,7 @@ var ( 5: "IMMUTABLE", 6: "UNORDERED_LIST", 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", } FieldBehavior_value = map[string]int32{ "FIELD_BEHAVIOR_UNSPECIFIED": 0, @@ -101,6 +115,7 @@ var ( "IMMUTABLE": 5, "UNORDERED_LIST": 6, "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, } ) @@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, @@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 1bc92248cb470..ab0fbb79b863d 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 712fef4d0fb9d..52d530d7ad01c 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go index b9b7219703876..7ea79410ad743 100644 --- a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go +++ b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go @@ -89,9 +89,9 @@ type LoggerConfig interface { // decision meets the condition for audit, all the configured audit loggers' // Log() method will be invoked to log that event. // -// TODO(lwge): Change the link to the merged gRFC once it's ready. -// Please refer to https://github.com/grpc/proposal/pull/346 for more details -// about audit logging. +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. type Logger interface { // Log performs audit logging for the provided audit event. // @@ -107,9 +107,9 @@ type Logger interface { // implement this interface, along with the Logger interface, and register // it by calling RegisterLoggerBuilder() at init time. // -// TODO(lwge): Change the link to the merged gRFC once it's ready. -// Please refer to https://github.com/grpc/proposal/pull/346 for more details -// about audit logging. +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. type LoggerBuilder interface { // ParseLoggerConfig parses the given JSON bytes into a structured // logger config this builder can use to build an audit logger. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b6377f445ad24..d79560a2e268f 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index f2ddfc3788ed9..86ba65be4c004 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -32,14 +32,18 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" durationpb "github.com/golang/protobuf/ptypes/duration" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" @@ -132,7 +136,11 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal // This generates a manual resolver builder with a fixed scheme. This // scheme will be used to dial to remote LB, so we can send filtered // address updates to remote LB ClientConn using this manual resolver. - r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} + mr := manual.NewBuilderWithScheme("grpclb-internal") + // ResolveNow() on this manual resolver is forwarded to the parent + // ClientConn, so when grpclb client loses contact with the remote balancer, + // the parent ClientConn's resolver will re-resolve. + mr.ResolveNowCallback = cc.ResolveNow lb := &lbBalancer{ cc: newLBCacheClientConn(cc), @@ -142,23 +150,24 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), - manualResolver: r, + manualResolver: mr, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), - picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), clientStats: newRPCStats(), backoff: backoff.DefaultExponential, // TODO: make backoff configurable. } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[grpclb %p] ", lb)) var err error if opt.CredsBundle != nil { lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) if err != nil { - logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to grpclb: %v", err) } lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) if err != nil { - logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to backends returned by grpclb: %v", err) } } @@ -170,6 +179,7 @@ type lbBalancer struct { dialTarget string // user's dial target target string // same as dialTarget unless overridden in service config opt balancer.BuildOptions + logger *internalgrpclog.PrefixLogger usePickFirst bool @@ -188,7 +198,7 @@ type lbBalancer struct { // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be // send to remote LB ClientConn through this resolver. - manualResolver *lbManualResolver + manualResolver *manual.Resolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper // backoff for calling remote balancer. @@ -236,12 +246,12 @@ type lbBalancer struct { // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} + lb.picker = base.NewErrPicker(fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)) return } if lb.state == connectivity.Connecting { - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } @@ -268,7 +278,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // // This doesn't seem to be necessary after the connecting check above. // Kept for safety. - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } if lb.inFallback { @@ -322,21 +332,21 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { // UpdateSubConnState is unused; NewSubConn's options always specifies // updateSubConnState as the listener. func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { - logger.Errorf("grpclb: UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) + lb.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) } func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState - if logger.V(2) { - logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("SubConn state change: %p, %v", sc, s) } lb.mu.Lock() defer lb.mu.Unlock() oldS, ok := lb.scStates[sc] if !ok { - if logger.V(2) { - logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) } return } @@ -441,8 +451,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { if lb.usePickFirst == newUsePickFirst { return } - if logger.V(2) { - logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + if lb.logger.V(2) { + lb.logger.Infof("Switching mode. Is pick_first used for backends? %v", newUsePickFirst) } lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) } @@ -453,8 +463,8 @@ func (lb *lbBalancer) ResolverError(error) { } func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if logger.V(2) { - logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + if lb.logger.V(2) { + lb.logger.Infof("UpdateClientConnState: %s", pretty.ToJSON(ccs)) } gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) @@ -482,7 +492,9 @@ func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error } else if lb.ccRemoteLB == nil { // First time receiving resolved addresses, create a cc to remote // balancers. - lb.newRemoteBalancerCCWrapper() + if err := lb.newRemoteBalancerCCWrapper(); err != nil { + return err + } // Start the fallback goroutine. go lb.fallbackToBackendsAfter(lb.fallbackTimeout) } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 39bc5cc71e819..20c5f2ec3967b 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -98,15 +98,6 @@ func (s *rpcStats) knownReceived() { atomic.AddInt64(&s.numCallsFinished, 1) } -type errPicker struct { - // Pick always returns this err. - err error -} - -func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{}, p.err -} - // rrPicker does roundrobin on subConns. It's typically used when there's no // response from remote balancer, and grpclb falls back to the resolved // backends. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index edb66a90a3b1b..c8fe1edd8e530 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -27,11 +27,8 @@ import ( "time" "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" @@ -39,13 +36,28 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" ) +func serverListEqual(a, b []*lbpb.Server) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !proto.Equal(a[i], b[i]) { + return false + } + } + return true +} + // processServerList updates balancer's internal state, create/remove SubConns // and regenerates picker using the received serverList. func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if logger.V(2) { - logger.Infof("lbBalancer: processing server list: %+v", l) + if lb.logger.V(2) { + lb.logger.Infof("Processing server list: %#v", l) } lb.mu.Lock() defer lb.mu.Unlock() @@ -55,9 +67,9 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { lb.serverListReceived = true // If the new server list == old server list, do nothing. - if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if logger.V(2) { - logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + if serverListEqual(lb.fullServerList, l.Servers) { + if lb.logger.V(2) { + lb.logger.Infof("Ignoring new server list as it is the same as the previous one") } return } @@ -78,9 +90,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { ipStr = fmt.Sprintf("[%s]", ipStr) } addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) - if logger.V(2) { - logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", - i, ipStr, s.Port, s.LoadBalanceToken) + if lb.logger.V(2) { + lb.logger.Infof("Server list entry:|%d|, ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) } backendAddrs = append(backendAddrs, addr) } @@ -149,7 +160,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // This bypasses the cc wrapper with SubConn cache. sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) return } sc.Connect() @@ -174,7 +185,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) continue } lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. @@ -217,7 +228,7 @@ type remoteBalancerCCWrapper struct { wg sync.WaitGroup } -func (lb *lbBalancer) newRemoteBalancerCCWrapper() { +func (lb *lbBalancer) newRemoteBalancerCCWrapper() error { var dopts []grpc.DialOption if creds := lb.opt.DialCreds; creds != nil { dopts = append(dopts, grpc.WithTransportCredentials(creds)) @@ -248,9 +259,10 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // // The grpclb server addresses will set field ServerName, and creds will // receive ServerName as authority. - cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) + target := lb.manualResolver.Scheme() + ":///grpclb.subClientConn" + cc, err := grpc.Dial(target, dopts...) if err != nil { - logger.Fatalf("failed to dial: %v", err) + return fmt.Errorf("grpc.Dial(%s): %v", target, err) } ccw := &remoteBalancerCCWrapper{ cc: cc, @@ -261,6 +273,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { lb.ccRemoteLB = ccw ccw.wg.Add(1) go ccw.watchRemoteBalancer() + return nil } // close closed the ClientConn to remote balancer, and waits until all @@ -408,9 +421,9 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { default: if err != nil { if err == errServerTerminatedConnection { - logger.Info(err) + ccw.lb.logger.Infof("Call to remote balancer failed: %v", err) } else { - logger.Warning(err) + ccw.lb.logger.Warningf("Call to remote balancer failed: %v", err) } } } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go index 680779f1c82eb..c0f762c0c050e 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -27,67 +27,6 @@ import ( "google.golang.org/grpc/resolver" ) -// The parent ClientConn should re-resolve when grpclb loses connection to the -// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, -// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's -// ResolveNow, and eventually results in re-resolve happening in parent -// ClientConn's resolver (DNS for example). -// -// parent -// ClientConn -// +-----------------------------------------------------------------+ -// | parent +---------------------------------+ | -// | DNS ClientConn | grpclb | | -// | resolver balancerWrapper | | | -// | + + | grpclb grpclb | | -// | | | | ManualResolver ClientConn | | -// | | | | + + | | -// | | | | | | Transient | | -// | | | | | | Failure | | -// | | | | | <--------- | | | -// | | | <--------------- | ResolveNow | | | -// | | <--------- | ResolveNow | | | | | -// | | ResolveNow | | | | | | -// | | | | | | | | -// | + + | + + | | -// | +---------------------------------+ | -// +-----------------------------------------------------------------+ - -// lbManualResolver is used by the ClientConn inside grpclb. It's a manual -// resolver with a special ResolveNow() function. -// -// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, -// so when grpclb client lose contact with remote balancers, the parent -// ClientConn's resolver will re-resolve. -type lbManualResolver struct { - scheme string - ccr resolver.ClientConn - - ccb balancer.ClientConn -} - -func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - r.ccr = cc - return r, nil -} - -func (r *lbManualResolver) Scheme() string { - return r.scheme -} - -// ResolveNow calls resolveNow on the parent ClientConn. -func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { - r.ccb.ResolveNow(o) -} - -// Close is a noop for Resolver. -func (*lbManualResolver) Close() {} - -// UpdateState calls cc.UpdateState. -func (r *lbManualResolver) UpdateState(s resolver.State) { - r.ccr.UpdateState(s) -} - const subConnCacheTime = time.Second * 10 // lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ff7fea102288c..429c389e4730d 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -337,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() return nil } @@ -404,13 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { // name resolver, load balancer and any subchannels. func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { - cc.mu.Unlock() return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() + channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) return nil } @@ -431,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { cc.balancerWrapper.enterIdleMode() cc.csMgr.updateState(connectivity.Idle) cc.idlenessState = ccIdlenessStateIdle - cc.mu.Unlock() + cc.addTraceEvent("entering idle mode") go func() { - cc.addTraceEvent("entering idle mode") for ac := range conns { ac.tearDown(errConnIdling) } }() + return nil } @@ -804,6 +804,12 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } + internal.EnterIdleModeForTesting = func(cc *ClientConn) error { + return cc.enterIdleMode() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.exitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 1fd0d5c127f4f..cfc9fd85e8dd9 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -644,6 +644,7 @@ func defaultDialOptions() dialOptions { UseProxy: true, }, recvBufferPool: nopBufferPool{}, + idleTimeout: 30 * time.Minute, } } @@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 69d5580b6adfd..5ebf88d7147f2 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a01a1b4d54bd5..4439cda0f3cb7 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -44,8 +44,15 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // All implementations should embed UnimplementedHealthServer // for forward compatibility type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da53bc..fed1c011a3259 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index 8177fb58da9aa..4cee66aeb6e69 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -328,6 +328,11 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer. // caching is disabled. if bg.outgoingStarted && bg.deletedBalancerCache != nil { if old, ok := bg.deletedBalancerCache.Remove(id); ok { + if bg.logger.V(2) { + bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + sbc, _ = old.(*subBalancerWrapper) if sbc != nil && sbc.builder != builder { // If the sub-balancer in cache was built with a different @@ -403,7 +408,7 @@ func (bg *BalancerGroup) Remove(id string) { sbToRemove, ok := bg.idToBalancerConfig[id] if !ok { - bg.logger.Infof("balancer group: trying to remove a non-existing locality from balancer group: %v", id) + bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id) bg.outgoingMu.Unlock() return } @@ -418,7 +423,17 @@ func (bg *BalancerGroup) Remove(id string) { } if bg.deletedBalancerCache != nil { + if bg.logger.V(2) { + bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + bg.deletedBalancerCache.Add(id, sbToRemove, func() { + if bg.logger.V(2) { + bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + // A sub-balancer evicted from the timeout cache needs to closed // and its subConns need to removed, unconditionally. There is a // possibility that a sub-balancer might be removed (thereby diff --git a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go index 3f2d47302c4e1..2fa48701023df 100644 --- a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go +++ b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go @@ -142,3 +142,10 @@ func (c *TimeoutCache) Clear(runCallback bool) { entry.callback() } } + +// Len returns the number of entries in the cache. +func (c *TimeoutCache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.cache) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c8a8c76d628ca..0d94c63e06e2f 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -175,6 +175,12 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) error + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 2f0417bd8db66..00f524a4809eb 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -23,6 +23,7 @@ package grpc_lookup_v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -98,6 +99,8 @@ type RouteLookupRequest struct { StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"` // Map of key values extracted via key builders for the gRPC or HTTP request. KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty"` } func (x *RouteLookupRequest) Reset() { @@ -160,6 +163,13 @@ func (x *RouteLookupRequest) GetKeyMap() map[string]string { return nil } +func (x *RouteLookupRequest) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + type RouteLookupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -173,6 +183,8 @@ type RouteLookupResponse struct { // Cached with "target" and sent with all requests that match the request key. // Allows the RLS to pass its work product to the eventual target. HeaderData string `protobuf:"bytes,2,opt,name=header_data,json=headerData,proto3" json:"header_data,omitempty"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty"` } func (x *RouteLookupResponse) Reset() { @@ -221,55 +233,70 @@ func (x *RouteLookupResponse) GetHeaderData() string { return "" } +func (x *RouteLookupResponse) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x83, 0x03, 0x0a, 0x12, 0x52, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x07, + 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, + 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x22, 0x94, 0x01, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, + 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, - 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, - 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, - 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, - 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, - 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, - 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, - 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, + 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -291,17 +318,20 @@ var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{ (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + (*anypb.Any)(nil), // 4: google.protobuf.Any } var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{ 0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason 3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry - 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest - 2, // 3: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 4, // 2: grpc.lookup.v1.RouteLookupRequest.extensions:type_name -> google.protobuf.Any + 4, // 3: grpc.lookup.v1.RouteLookupResponse.extensions:type_name -> google.protobuf.Any + 1, // 4: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest + 2, // 5: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse + 5, // [5:6] is the sub-list for method output_type + 4, // [4:5] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_grpc_lookup_v1_rls_proto_init() } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 4cf85cad9f810..03ef2fedd5cb5 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,6 +43,34 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa00aa..17f7a21b5a9f0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index badab8acf3b11..d6f5c49358b58 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1548,14 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index c06db679d89cc..6fa1eb41992a0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) for _, sh := range t.stats { s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ @@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (t *http2Server) HandleStreams(handle func(*Stream)) { defer close(t.readerDone) for { t.controlBuf.throttle() @@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(frame, handle); err != nil { t.Close(err) break } @@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 1958140082b35..dc29d590e91fb 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -34,12 +34,9 @@ import ( "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -88,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 74a811fc0590b..aac056e723bb5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -698,7 +698,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go index 2d58725547fc0..04edae6de66f1 100644 --- a/vendor/google.golang.org/grpc/orca/producer.go +++ b/vendor/google.golang.org/grpc/orca/producer.go @@ -24,6 +24,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/orca/internal" "google.golang.org/grpc/status" @@ -169,48 +170,29 @@ func (p *producer) updateRunLocked() { func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { defer close(done) - backoffAttempt := 0 - backoffTimer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-backoffTimer.C: - case <-ctx.Done(): - return - } - + runStream := func() error { resetBackoff, err := p.runStream(ctx, interval) - - if resetBackoff { - backoffTimer.Reset(0) - backoffAttempt = 0 - } else { - backoffTimer.Reset(p.backoff(backoffAttempt)) - backoffAttempt++ - } - - switch { - case err == nil: - // No error was encountered; restart the stream. - case ctx.Err() != nil: - // Producer was stopped; exit immediately and without logging an - // error. - return - case status.Code(err) == codes.Unimplemented: + if status.Code(err) == codes.Unimplemented { // Unimplemented; do not retry. logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") - return - case status.Code(err) == codes.Unavailable, status.Code(err) == codes.Canceled: - // TODO: these codes should ideally log an error, too, but for now - // we receive them when shutting down the ClientConn (Unavailable - // if the stream hasn't started yet, and Canceled if it happens - // mid-stream). Once we can determine the state or ensure the - // producer is stopped before the stream ends, we can log an error - // when it's not a natural shutdown. - default: - // Log all other errors. + return err + } + // Retry for all other errors. + if code := status.Code(err); code != codes.Unavailable && code != codes.Canceled { + // TODO: Unavailable and Canceled should also ideally log an error, + // but for now we receive them when shutting down the ClientConn + // (Unavailable if the stream hasn't started yet, and Canceled if it + // happens mid-stream). Once we can determine the state or ensure + // the producer is stopped before the stream ends, we can log an + // error when it's not a natural shutdown. logger.Error("Received unexpected stream error:", err) } + if resetBackoff { + return backoff.ErrResetBackoff + } + return nil } + backoff.RunF(ctx, runStream, p.backoff) } // runStream runs a single stream on the subchannel and returns the resulting diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index e6b0f14cd941f..0a4262342f358 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -26,7 +26,9 @@ import ( "google.golang.org/grpc/resolver" ) -// NewBuilderWithScheme creates a new test resolver builder with the given scheme. +// NewBuilderWithScheme creates a new manual resolver builder with the given +// scheme. Every instance of the manual resolver may only ever be used with a +// single grpc.ClientConn. Otherwise, bad things will happen. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, @@ -58,30 +60,34 @@ type Resolver struct { scheme string // Fields actually belong to the resolver. - mu sync.Mutex // Guards access to CC. - CC resolver.ClientConn - bootstrapState *resolver.State + // Guards access to below fields. + mu sync.Mutex + CC resolver.ClientConn + // Storing the most recent state update makes this resolver resilient to + // restarts, which is possible with channel idleness. + lastSeenState *resolver.State } // InitialState adds initial state to the resolver so that UpdateState doesn't // need to be explicitly called after Dial. func (r *Resolver) InitialState(s resolver.State) { - r.bootstrapState = &s + r.lastSeenState = &s } // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) r.mu.Lock() r.CC = cc - r.mu.Unlock() - r.BuildCallback(target, cc, opts) - if r.bootstrapState != nil { - r.UpdateState(*r.bootstrapState) + if r.lastSeenState != nil { + err := r.CC.UpdateState(*r.lastSeenState) + go r.UpdateStateCallback(err) } + r.mu.Unlock() return r, nil } -// Scheme returns the test scheme. +// Scheme returns the manual resolver's scheme. func (r *Resolver) Scheme() string { return r.scheme } @@ -100,6 +106,7 @@ func (r *Resolver) Close() { func (r *Resolver) UpdateState(s resolver.State) { r.mu.Lock() err := r.CC.UpdateState(s) + r.lastSeenState = &s r.mu.Unlock() r.UpdateStateCallback(err) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eeae92fbe0204..8f60d421437d9 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -983,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { f := func() { defer streamQuota.release() defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -995,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) wg.Wait() } @@ -1049,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.serveStreams(st) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1133,7 +1103,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1152,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1194,7 +1164,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1208,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1240,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1262,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1348,7 +1317,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1362,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1370,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1395,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1403,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1418,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1445,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1460,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1479,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1521,7 +1490,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1535,10 +1504,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1574,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1616,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1694,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1712,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.RemoteAddr(), + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } @@ -1748,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1767,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40e4d1..07f0125768808 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 724ad21021300..6d2cadd79a9b9 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.3" +const Version = "1.59.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bbc9e2e3c8e36..bb480f1f9cca1 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -93,6 +93,9 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index f8f749835c24f..074154a751be3 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -47,9 +47,8 @@ import ( ) const ( - c2pScheme = "google-c2p" - c2pExperimentalScheme = "google-c2p-experimental" - c2pAuthority = "traffic-director-c2p.xds.googleapis.com" + c2pScheme = "google-c2p" + c2pAuthority = "traffic-director-c2p.xds.googleapis.com" tdURL = "dns:///directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second @@ -77,18 +76,10 @@ var ( ) func init() { - resolver.Register(c2pResolverBuilder{ - scheme: c2pScheme, - }) - // TODO(apolcyn): remove this experimental scheme before the 1.52 release - resolver.Register(c2pResolverBuilder{ - scheme: c2pExperimentalScheme, - }) + resolver.Register(c2pResolverBuilder{}) } -type c2pResolverBuilder struct { - scheme string -} +type c2pResolverBuilder struct{} func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { if t.URL.Host != "" { @@ -165,7 +156,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts } func (b c2pResolverBuilder) Scheme() string { - return b.scheme + return c2pScheme } type c2pResolver struct { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 85a081d09df55..34c3592180750 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -18,8 +18,8 @@ package cdsbalancer import ( + "context" "encoding/json" - "errors" "fmt" "google.golang.org/grpc/balancer" @@ -28,7 +28,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/balancer/nop" - "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" @@ -42,11 +41,13 @@ import ( ) const ( - cdsName = "cds_experimental" + cdsName = "cds_experimental" + aggregateClusterMaxDepth = 16 ) var ( - errBalancerClosed = errors.New("cds_experimental LB policy is closed") + errBalancerClosed = fmt.Errorf("cds_experimental LB policy is closed") + errExceedsMaxDepth = fmt.Errorf("aggregate cluster graph exceeds max depth (%d)", aggregateClusterMaxDepth) // newChildBalancer is a helper function to build a new cluster_resolver // balancer and will be overridden in unittests. @@ -81,22 +82,29 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) } - crParser, ok := builder.(balancer.ConfigParser) + parser, ok := builder.(balancer.ConfigParser) if !ok { // Shouldn't happen, imported Cluster Resolver builder has this method. logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) } + + ctx, cancel := context.WithCancel(context.Background()) b := &cdsBalancer{ - bOpts: opts, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - crParser: crParser, - xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), + bOpts: opts, + childConfigParser: parser, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), + watchers: make(map[string]*watcherState), + } + b.ccw = &ccWrapper{ + ClientConn: cc, + xdsHI: b.xdsHI, } b.logger = prefixLogger((b)) b.logger.Infof("Created") + var creds credentials.TransportCredentials switch { case opts.DialCreds != nil: @@ -108,12 +116,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.xdsCredsInUse = true } b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse) - b.clusterHandler = newClusterHandler(b) - b.ccw = &ccWrapper{ - ClientConn: cc, - xdsHI: b.xdsHI, - } - go b.run() return b } @@ -139,61 +141,45 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err return &cfg, nil } -// ccUpdate wraps a clientConn update received from gRPC (pushed from the -// xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS -// watcher with the xdsClient, while a non-nil error causes it to cancel the -// existing watch and propagate the error to the underlying cluster_resolver -// balancer. -type ccUpdate struct { - clusterName string - err error -} - -type exitIdle struct{} - // cdsBalancer implements a CDS based LB policy. It instantiates a // cluster_resolver balancer to further resolve the serviceName received from // CDS, into localities and endpoints. Implements the balancer.Balancer // interface which is exposed to gRPC and implements the balancer.ClientConn // interface which is exposed to the cluster_resolver balancer. type cdsBalancer struct { - ccw *ccWrapper // ClientConn interface passed to child LB. - bOpts balancer.BuildOptions // BuildOptions passed to child LB. - updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. - xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource. - clusterHandler *clusterHandler // To watch the clusters. - childLB balancer.Balancer - logger *grpclog.PrefixLogger - closed *grpcsync.Event - done *grpcsync.Event - crParser balancer.ConfigParser + // The following fields are initialized at build time and are either + // read-only after that or provide their own synchronization, and therefore + // do not need to be guarded by a mutex. + ccw *ccWrapper // ClientConn interface passed to child LB. + bOpts balancer.BuildOptions // BuildOptions passed to child LB. + childConfigParser balancer.ConfigParser // Config parser for cluster_resolver LB policy. + xdsHI *xdsinternal.HandshakeInfo // Handshake info from security configuration. + logger *grpclog.PrefixLogger // Prefix logger for all logging. + + // The serializer and its cancel func are initialized at build time, and the + // rest of the fields here are only accessed from serializer callbacks (or + // from balancer.Balancer methods, which themselves are guaranteed to be + // mutually exclusive) and hence do not need to be guarded by a mutex. + serializer *grpcsync.CallbackSerializer // Serializes updates from gRPC and xDS client. + serializerCancel context.CancelFunc // Stops the above serializer. + childLB balancer.Balancer // Child policy, built upon resolution of the cluster graph. + xdsClient xdsclient.XDSClient // xDS client to watch Cluster resources. + watchers map[string]*watcherState // Set of watchers and associated state, keyed by cluster name. + lbCfg *lbConfig // Current load balancing configuration. // The certificate providers are cached here to that they can be closed when // a new provider is to be created. cachedRoot certprovider.Provider cachedIdentity certprovider.Provider - xdsHI *xdsinternal.HandshakeInfo xdsCredsInUse bool } -// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good -// updates lead to registration of a CDS watch. Updates with error lead to -// cancellation of existing watch and propagation of the same error to the -// cluster_resolver balancer. -func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { - // We first handle errors, if any, and then proceed with handling the - // update, only if the status quo has changed. - if err := update.err; err != nil { - b.handleErrorFromUpdate(err, true) - return - } - b.clusterHandler.updateRootCluster(update.clusterName) -} - // handleSecurityConfig processes the security configuration received from the // management server, creates appropriate certificate provider plugins, and // updates the HandhakeInfo which is added as an address attribute in // NewSubConn() calls. +// +// Only executed in the context of a serializer callback. func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { // If xdsCredentials are not in use, i.e, the user did not want to get // security configuration from an xDS server, we should not be acting on the @@ -220,7 +206,7 @@ func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) e // Bootstrap did not find any certificate provider configs, but the user // has specified xdsCredentials and the management server has sent down // security configuration. - return errors.New("xds: certificate_providers config missing in bootstrap file") + return fmt.Errorf("xds: certificate_providers config missing in bootstrap file") } cpc := bc.CertProviderConfigs @@ -278,220 +264,29 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } -// handleWatchUpdate handles a watch update from the xDS Client. Good updates -// lead to clientConn updates being invoked on the underlying cluster_resolver balancer. -func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { - if err := update.err; err != nil { - b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) - b.handleErrorFromUpdate(err, false) - return - } - - b.logger.Infof("Received Cluster resource contains content: %s, security config: %s", pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) - - // Process the security config from the received update before building the - // child policy or forwarding the update to it. We do this because the child - // policy may try to create a new subConn inline. Processing the security - // configuration here and setting up the handshakeInfo will make sure that - // such attempts are handled properly. - if err := b.handleSecurityConfig(update.securityCfg); err != nil { - // If the security config is invalid, for example, if the provider - // instance is not found in the bootstrap config, we need to put the - // channel in transient failure. - b.logger.Warningf("Received Cluster resource contains invalid security config: %v", err) - b.handleErrorFromUpdate(err, false) - return - } - - // The first good update from the watch API leads to the instantiation of an - // cluster_resolver balancer. Further updates/errors are propagated to the existing - // cluster_resolver balancer. - if b.childLB == nil { - childLB, err := newChildBalancer(b.ccw, b.bOpts) - if err != nil { - b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) - return - } - b.childLB = childLB - b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) - } - - dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates)) - for i, cu := range update.updates { - switch cu.ClusterType { - case xdsresource.ClusterTypeEDS: - dms[i] = clusterresolver.DiscoveryMechanism{ - Type: clusterresolver.DiscoveryMechanismTypeEDS, - Cluster: cu.ClusterName, - EDSServiceName: cu.EDSServiceName, - MaxConcurrentRequests: cu.MaxRequests, - } - if cu.LRSServerConfig == xdsresource.ClusterLRSServerSelf { - bootstrapConfig := b.xdsClient.BootstrapConfig() - parsedName := xdsresource.ParseName(cu.ClusterName) - if parsedName.Scheme == xdsresource.FederationScheme { - // Is a federation resource name, find the corresponding - // authority server config. - if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok { - dms[i].LoadReportingServer = cfg.XDSServer - } - } else { - // Not a federation resource name, use the default - // authority. - dms[i].LoadReportingServer = bootstrapConfig.XDSServer - } - } - case xdsresource.ClusterTypeLogicalDNS: - dms[i] = clusterresolver.DiscoveryMechanism{ - Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, - Cluster: cu.ClusterName, - DNSHostname: cu.DNSHostName, - } - default: - b.logger.Infof("Unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) - } - if envconfig.XDSOutlierDetection { - odJSON := cu.OutlierDetection - // "In the cds LB policy, if the outlier_detection field is not set in - // the Cluster resource, a "no-op" outlier_detection config will be - // generated in the corresponding DiscoveryMechanism config, with all - // fields unset." - A50 - if odJSON == nil { - // This will pick up top level defaults in Cluster Resolver - // ParseConfig, but sre and fpe will be nil still so still a - // "no-op" config. - odJSON = json.RawMessage(`{}`) - } - dms[i].OutlierDetection = odJSON - } - } - - // Prepare Cluster Resolver config, marshal into JSON, and then Parse it to - // get configuration to send downward to Cluster Resolver. - lbCfg := &clusterresolver.LBConfig{ - DiscoveryMechanisms: dms, - XDSLBPolicy: update.lbPolicy, - } - crLBCfgJSON, err := json.Marshal(lbCfg) - if err != nil { - // Shouldn't happen, since we just prepared struct. - b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg) - return - } - - var sc serviceconfig.LoadBalancingConfig - if sc, err = b.crParser.ParseConfig(crLBCfgJSON); err != nil { - b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(crLBCfgJSON), err) - return - } - - ccState := balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), - BalancerConfig: sc, +// A convenience method to create a watcher for cluster `name`. It also +// registers the watch with the xDS client, and adds the newly created watcher +// to the list of watchers maintained by the LB policy. +func (b *cdsBalancer) createAndAddWatcherForCluster(name string) { + w := &clusterWatcher{ + name: name, + parent: b, } - if err := b.childLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) - } -} - -// run is a long-running goroutine which handles all updates from gRPC. All -// methods which are invoked directly by gRPC or xdsClient simply push an -// update onto a channel which is read and acted upon right here. -func (b *cdsBalancer) run() { - for { - select { - case u, ok := <-b.updateCh.Get(): - if !ok { - return - } - b.updateCh.Load() - switch update := u.(type) { - case *ccUpdate: - b.handleClientConnUpdate(update) - case exitIdle: - if b.childLB == nil { - b.logger.Errorf("Received ExitIdle with no child policy") - break - } - // This implementation assumes the child balancer supports - // ExitIdle (but still checks for the interface's existence to - // avoid a panic if not). If the child does not, no subconns - // will be connected. - if ei, ok := b.childLB.(balancer.ExitIdler); ok { - ei.ExitIdle() - } - } - case u := <-b.clusterHandler.updateChannel: - b.handleWatchUpdate(u) - case <-b.closed.Done(): - b.clusterHandler.close() - if b.childLB != nil { - b.childLB.Close() - b.childLB = nil - } - if b.cachedRoot != nil { - b.cachedRoot.Close() - } - if b.cachedIdentity != nil { - b.cachedIdentity.Close() - } - b.updateCh.Close() - b.logger.Infof("Shutdown") - b.done.Fire() - return - } - } -} - -// handleErrorFromUpdate handles both the error from parent ClientConn (from -// resolver) and the error from xds client (from the watcher). fromParent is -// true if error is from parent ClientConn. -// -// If the error is connection error, it's passed down to the child policy. -// Nothing needs to be done in CDS (e.g. it doesn't go into fallback). -// -// If the error is resource-not-found: -// - If it's from resolver, it means LDS resources were removed. The CDS watch -// should be canceled. -// - If it's from xds client, it means CDS resource were removed. The CDS -// watcher should keep watching. -// -// In both cases, the error will be forwarded to the child balancer. And if -// error is resource-not-found, the child balancer will stop watching EDS. -func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - // This is not necessary today, because xds client never sends connection - // errors. - if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { - b.clusterHandler.close() - } - if b.childLB != nil { - if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { - // Connection errors will be sent to the child balancers directly. - // There's no need to forward them. - b.childLB.ResolverError(err) - } - } else { - // If child balancer was never created, fail the RPCs with - // errors. - b.ccw.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(err), - }) + ws := &watcherState{ + watcher: w, + cancelWatch: xdsresource.WatchCluster(b.xdsClient, name, w), } + b.watchers[name] = ws } // UpdateClientConnState receives the serviceConfig (which contains the // clusterName to watch for in CDS) and the xdsClient object from the // xdsResolver. func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if b.closed.HasFired() { - b.logger.Errorf("Received balancer config after close") - return errBalancerClosed - } - if b.xdsClient == nil { c := xdsclient.FromResolverState(state.ResolverState) if c == nil { + b.logger.Warningf("Received balancer config with no xDS client") return balancer.ErrBadResolverState } b.xdsClient = c @@ -510,17 +305,49 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro b.logger.Warningf("Received balancer config with no cluster name") return balancer.ErrBadResolverState } - b.updateCh.Put(&ccUpdate{clusterName: lbCfg.ClusterName}) + + // Do nothing and return early if configuration has not changed. + if b.lbCfg != nil && b.lbCfg.ClusterName == lbCfg.ClusterName { + return nil + } + b.lbCfg = lbCfg + + // Handle the update in a blocking fashion. + done := make(chan struct{}) + ok = b.serializer.Schedule(func(context.Context) { + // A config update with a changed top-level cluster name means that none + // of our old watchers make any sense any more. + b.closeAllWatchers() + + // Create a new watcher for the top-level cluster. Upon resolution, it + // could end up creating more watchers if turns out to be an aggregate + // cluster. + b.createAndAddWatcherForCluster(lbCfg.ClusterName) + close(done) + }) + if !ok { + // The call to Schedule returns false *only* if the serializer has been + // closed, which happens only when we receive an update after close. + return errBalancerClosed + } + <-done return nil } // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { - if b.closed.HasFired() { - b.logger.Warningf("Received resolver error after close: %v", err) - return - } - b.updateCh.Put(&ccUpdate{err: err}) + b.serializer.Schedule(func(context.Context) { + // Resource not found error is reported by the resolver when the + // top-level cluster resource is removed by the management server. + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.closeAllWatchers() + } + var root string + if b.lbCfg != nil { + root = b.lbCfg.ClusterName + } + b.onClusterError(root, err) + }) } // UpdateSubConnState handles subConn updates from gRPC. @@ -528,15 +355,303 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } +// Closes all registered cluster wathers and removes them from the internal map. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) closeAllWatchers() { + for name, state := range b.watchers { + state.cancelWatch() + delete(b.watchers, name) + } +} + // Close cancels the CDS watch, closes the child policy and closes the // cdsBalancer. func (b *cdsBalancer) Close() { - b.closed.Fire() - <-b.done.Done() + b.serializer.Schedule(func(ctx context.Context) { + b.closeAllWatchers() + + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil + } + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() } func (b *cdsBalancer) ExitIdle() { - b.updateCh.Put(exitIdle{}) + b.serializer.Schedule(func(context.Context) { + if b.childLB == nil { + b.logger.Warningf("Received ExitIdle with no child policy") + return + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + }) +} + +// Handles a good Cluster update from the xDS client. Kicks off the discovery +// mechanism generation process from the top-level cluster and if the cluster +// graph is resolved, generates child policy config and pushes it down. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterUpdate(name string, update xdsresource.ClusterUpdate) { + state := b.watchers[name] + if state == nil { + // We are currently not watching this cluster anymore. Return early. + return + } + + b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update)) + + // Update the watchers map with the update for the cluster. + state.lastUpdate = &update + + // For an aggregate cluster, always use the security configuration on the + // root cluster. + if name == b.lbCfg.ClusterName { + // Process the security config from the received update before building the + // child policy or forwarding the update to it. We do this because the child + // policy may try to create a new subConn inline. Processing the security + // configuration here and setting up the handshakeInfo will make sure that + // such attempts are handled properly. + if err := b.handleSecurityConfig(update.SecurityCfg); err != nil { + // If the security config is invalid, for example, if the provider + // instance is not found in the bootstrap config, we need to put the + // channel in transient failure. + b.onClusterError(name, fmt.Errorf("received Cluster resource contains invalid security config: %v", err)) + return + } + } + + clustersSeen := make(map[string]bool) + dms, ok, err := b.generateDMsForCluster(b.lbCfg.ClusterName, 0, nil, clustersSeen) + if err != nil { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("failed to generate discovery mechanisms: %v", err)) + return + } + if ok { + if len(dms) == 0 { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("aggregate cluster graph has no leaf clusters")) + return + } + // Child policy is built the first time we resolve the cluster graph. + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) + if err != nil { + b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) + return + } + b.childLB = childLB + b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) + } + + // Prepare the child policy configuration, convert it to JSON, have it + // parsed by the child policy to convert it into service config and push + // an update to it. + childCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: dms, + // The LB policy is configured by the root cluster. + XDSLBPolicy: b.watchers[b.lbCfg.ClusterName].lastUpdate.LBPolicy, + } + cfgJSON, err := json.Marshal(childCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", childCfg) + return + } + + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childConfigParser.ParseConfig(cfgJSON); err != nil { + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(cfgJSON), err) + return + } + + ccState := balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), + BalancerConfig: sc, + } + if err := b.childLB.UpdateClientConnState(ccState); err != nil { + b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) + } + } + // We no longer need the clusters that we did not see in this iteration of + // generateDMsForCluster(). + for cluster := range clustersSeen { + state, ok := b.watchers[cluster] + if ok { + continue + } + state.cancelWatch() + delete(b.watchers, cluster) + } +} + +// Handles an error Cluster update from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterError(name string, err error) { + b.logger.Warningf("Cluster resource %q received error update: %v", name, err) + + if b.childLB != nil { + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { + // Connection errors will be sent to the child balancers directly. + // There's no need to forward them. + b.childLB.ResolverError(err) + } + } else { + // If child balancer was never created, fail the RPCs with + // errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(fmt.Errorf("%q: %v", name, err)), + }) + } +} + +// Handles a resource-not-found error from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterResourceNotFound(name string) { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", name) + if b.childLB != nil { + b.childLB.ResolverError(err) + } else { + // If child balancer was never created, fail the RPCs with errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + } +} + +// Generates discovery mechanisms for the cluster graph rooted at `name`. This +// method is called recursively if `name` corresponds to an aggregate cluster, +// with the base case for recursion being a leaf cluster. If a new cluster is +// encountered when traversing the graph, a watcher is created for it. +// +// Inputs: +// - name: name of the cluster to start from +// - depth: recursion depth of the current cluster, starting from root +// - dms: prioritized list of current discovery mechanisms +// - clustersSeen: cluster names seen so far in the graph traversal +// +// Outputs: +// - new prioritized list of discovery mechanisms +// - boolean indicating if traversal of the aggregate cluster graph is +// complete. If false, the above list of discovery mechanisms is ignored. +// - error indicating if any error was encountered as part of the graph +// traversal. If error is non-nil, the other return values are ignored. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) generateDMsForCluster(name string, depth int, dms []clusterresolver.DiscoveryMechanism, clustersSeen map[string]bool) ([]clusterresolver.DiscoveryMechanism, bool, error) { + if depth >= aggregateClusterMaxDepth { + return dms, false, errExceedsMaxDepth + } + + if clustersSeen[name] { + // Discovery mechanism already seen through a different branch. + return dms, true, nil + } + clustersSeen[name] = true + + state, ok := b.watchers[name] + if !ok { + // If we have not seen this cluster so far, create a watcher for it, add + // it to the map, start the watch and return. + b.createAndAddWatcherForCluster(name) + + // And since we just created the watcher, we know that we haven't + // resolved the cluster graph yet. + return dms, false, nil + } + + // A watcher exists, but no update has been received yet. + if state.lastUpdate == nil { + return dms, false, nil + } + + var dm clusterresolver.DiscoveryMechanism + cluster := state.lastUpdate + switch cluster.ClusterType { + case xdsresource.ClusterTypeAggregate: + // This boolean is used to track if any of the clusters in the graph is + // not yet completely resolved or returns errors, thereby allowing us to + // traverse as much of the graph as possible (and start the associated + // watches where required) to ensure that clustersSeen contains all + // clusters in the graph that we can traverse to. + missingCluster := false + var err error + for _, child := range cluster.PrioritizedClusterNames { + var ok bool + dms, ok, err = b.generateDMsForCluster(child, depth+1, dms, clustersSeen) + if err != nil || !ok { + missingCluster = true + } + } + return dms, !missingCluster, err + case xdsresource.ClusterTypeEDS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, + Cluster: cluster.ClusterName, + EDSServiceName: cluster.EDSServiceName, + MaxConcurrentRequests: cluster.MaxRequests, + } + if cluster.LRSServerConfig == xdsresource.ClusterLRSServerSelf { + bootstrapConfig := b.xdsClient.BootstrapConfig() + parsedName := xdsresource.ParseName(cluster.ClusterName) + if parsedName.Scheme == xdsresource.FederationScheme { + // Is a federation resource name, find the corresponding + // authority server config. + if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok { + dm.LoadReportingServer = cfg.XDSServer + } + } else { + // Not a federation resource name, use the default + // authority. + dm.LoadReportingServer = bootstrapConfig.XDSServer + } + } + case xdsresource.ClusterTypeLogicalDNS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, + Cluster: cluster.ClusterName, + DNSHostname: cluster.DNSHostName, + } + } + if envconfig.XDSOutlierDetection { + odJSON := cluster.OutlierDetection + // "In the cds LB policy, if the outlier_detection field is not set in + // the Cluster resource, a "no-op" outlier_detection config will be + // generated in the corresponding DiscoveryMechanism config, with all + // fields unset." - A50 + if odJSON == nil { + // This will pick up top level defaults in Cluster Resolver + // ParseConfig, but sre and fpe will be nil still so still a + // "no-op" config. + odJSON = json.RawMessage(`{}`) + } + dm.OutlierDetection = odJSON + } + + return append(dms, dm), true, nil } // ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go deleted file mode 100644 index aa2d9674a7904..0000000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cdsbalancer - -import ( - "encoding/json" - "errors" - "sync" - - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -const maxDepth = 16 - -var ( - errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") - errExceedsMaxDepth = errors.New("aggregate cluster graph exceeds max depth") -) - -// clusterHandlerUpdate wraps the information received from the registered CDS -// watcher. A non-nil error is propagated to the underlying cluster_resolver -// balancer. A valid update results in creating a new cluster_resolver balancer -// (if one doesn't already exist) and pushing the update to it. -type clusterHandlerUpdate struct { - // securityCfg is the Security Config from the top (root) cluster. - securityCfg *xdsresource.SecurityConfig - - // lbPolicy is the the child of the cluster_impl policy, for all priorities. - lbPolicy json.RawMessage - - // updates is a list of ClusterUpdates from all the leaf clusters. - updates []xdsresource.ClusterUpdate - err error -} - -// clusterHandler will be given a name representing a cluster. It will then -// update the CDS policy constantly with a list of Clusters to pass down to -// XdsClusterResolverLoadBalancingPolicyConfig in a stream like fashion. -type clusterHandler struct { - parent *cdsBalancer - - // A mutex to protect entire tree of clusters. - clusterMutex sync.Mutex - rootClusterName string - - createdClusters map[string]*clusterNode - - // A way to ping CDS Balancer about any updates or errors to a Node in the - // tree. This will either get called from this handler constructing an - // update or from a child with an error. Capacity of one as the only update - // CDS Balancer cares about is the most recent update. - updateChannel chan clusterHandlerUpdate -} - -func newClusterHandler(parent *cdsBalancer) *clusterHandler { - return &clusterHandler{ - parent: parent, - updateChannel: make(chan clusterHandlerUpdate, 1), - createdClusters: make(map[string]*clusterNode), - } -} - -func (ch *clusterHandler) updateRootCluster(rootClusterName string) { - ch.clusterMutex.Lock() - defer ch.clusterMutex.Unlock() - if ch.createdClusters[ch.rootClusterName] == nil { - // Construct a root node on first update. - createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) - ch.rootClusterName = rootClusterName - return - } - // Check if root cluster was changed. If it was, delete old one and start - // new one, if not do nothing. - if rootClusterName != ch.rootClusterName { - ch.createdClusters[ch.rootClusterName].delete() - createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) - ch.rootClusterName = rootClusterName - } -} - -// This function tries to construct a cluster update to send to CDS. -func (ch *clusterHandler) constructClusterUpdate() { - if ch.createdClusters[ch.rootClusterName] == nil { - // If root is nil, this handler is closed, ignore the update. - return - } - clusterUpdate, err := ch.createdClusters[ch.rootClusterName].constructClusterUpdate(make(map[string]bool)) - if err != nil { - // If there was an error received no op, as this can mean one of the - // children hasn't received an update yet, or the graph continued to - // stay in an error state. If the graph continues to stay in an error - // state, no new error needs to be written to the update buffer as that - // would be redundant information. - return - } - if clusterUpdate == nil { - // This means that there was an aggregated cluster with no EDS or DNS as - // leaf nodes. No update to be written. - return - } - // For a ClusterUpdate, the only update CDS cares about is the most - // recent one, so opportunistically drain the update channel before - // sending the new update. - select { - case <-ch.updateChannel: - default: - } - - ch.updateChannel <- clusterHandlerUpdate{ - securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, - lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, - updates: clusterUpdate, - } -} - -// close() is meant to be called by CDS when the CDS balancer is closed, and it -// cancels the watches for every cluster in the cluster tree. -func (ch *clusterHandler) close() { - ch.clusterMutex.Lock() - defer ch.clusterMutex.Unlock() - if ch.createdClusters[ch.rootClusterName] == nil { - return - } - ch.createdClusters[ch.rootClusterName].delete() - ch.rootClusterName = "" -} - -// This logically represents a cluster. This handles all the logic for starting -// and stopping a cluster watch, handling any updates, and constructing a list -// recursively for the ClusterHandler. -type clusterNode struct { - // A way to cancel the watch for the cluster. - cancelFunc func() - - // A list of children, as the Node can be an aggregate Cluster. - children []string - - // A ClusterUpdate in order to build a list of cluster updates for CDS to - // send down to child XdsClusterResolverLoadBalancingPolicy. - clusterUpdate xdsresource.ClusterUpdate - - // This boolean determines whether this Node has received an update or not. - // This isn't the best practice, but this will protect a list of Cluster - // Updates from being constructed if a cluster in the tree has not received - // an update yet. - receivedUpdate bool - - clusterHandler *clusterHandler - - depth int32 - refCount int32 - - // maxDepthErr is set if this cluster node is an aggregate cluster and has a - // child that causes the graph to exceed the maximum depth allowed. This is - // used to show a cluster graph as being in an error state when it constructs - // a cluster update. - maxDepthErr error -} - -// CreateClusterNode creates a cluster node from a given clusterName. This will -// also start the watch for that cluster. -func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler, depth int32) { - // If the cluster has already been created, simply return, which ignores - // duplicates. - if topLevelHandler.createdClusters[clusterName] != nil { - topLevelHandler.createdClusters[clusterName].refCount++ - return - } - c := &clusterNode{ - clusterHandler: topLevelHandler, - depth: depth, - refCount: 1, - } - // Communicate with the xds client here. - topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) - cancel := xdsClient.WatchCluster(clusterName, c.handleResp) - c.cancelFunc = func() { - topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) - cancel() - } - topLevelHandler.createdClusters[clusterName] = c -} - -// This function cancels the cluster watch on the cluster and all of it's -// children. -func (c *clusterNode) delete() { - c.refCount-- - if c.refCount == 0 { - c.cancelFunc() - delete(c.clusterHandler.createdClusters, c.clusterUpdate.ClusterName) - for _, child := range c.children { - if c.clusterHandler.createdClusters[child] != nil { - c.clusterHandler.createdClusters[child].delete() - } - } - } -} - -// Construct cluster update (potentially a list of ClusterUpdates) for a node. -func (c *clusterNode) constructClusterUpdate(clustersSeen map[string]bool) ([]xdsresource.ClusterUpdate, error) { - // If the cluster has not yet received an update, the cluster update is not - // yet ready. - if !c.receivedUpdate { - return nil, errNotReceivedUpdate - } - if c.maxDepthErr != nil { - return nil, c.maxDepthErr - } - // Ignore duplicates. It's ok to ignore duplicates because the second - // occurrence of a cluster will never be used. I.e. in [C, D, C], the second - // C will never be used (the only way to fall back to lower priority D is if - // C is down, which means second C will never be chosen). Thus, [C, D, C] is - // logically equivalent to [C, D]. - if clustersSeen[c.clusterUpdate.ClusterName] { - return []xdsresource.ClusterUpdate{}, nil - } - clustersSeen[c.clusterUpdate.ClusterName] = true - - // Base case - LogicalDNS or EDS. Both of these cluster types will be tied - // to a single ClusterUpdate. - if c.clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { - return []xdsresource.ClusterUpdate{c.clusterUpdate}, nil - } - - // If an aggregate construct a list by recursively calling down to all of - // it's children. - var childrenUpdates []xdsresource.ClusterUpdate - for _, child := range c.children { - childUpdateList, err := c.clusterHandler.createdClusters[child].constructClusterUpdate(clustersSeen) - if err != nil { - return nil, err - } - childrenUpdates = append(childrenUpdates, childUpdateList...) - } - return childrenUpdates, nil -} - -// handleResp handles a xds response for a particular cluster. This function -// also handles any logic with regards to any child state that may have changed. -// At the end of the handleResp(), the clusterUpdate will be pinged in certain -// situations to try and construct an update to send back to CDS. -func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err error) { - c.clusterHandler.clusterMutex.Lock() - defer c.clusterHandler.clusterMutex.Unlock() - if err != nil { // Write this error for run() to pick up in CDS LB policy. - // For a ClusterUpdate, the only update CDS cares about is the most - // recent one, so opportunistically drain the update channel before - // sending the new update. - select { - case <-c.clusterHandler.updateChannel: - default: - } - c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} - c.receivedUpdate = false - c.maxDepthErr = nil - return - } - - c.receivedUpdate = true - c.clusterUpdate = clusterUpdate - - // If the cluster was a leaf node, if the cluster update received had change - // in the cluster update then the overall cluster update would change and - // there is a possibility for the overall update to build so ping cluster - // handler to return. Also, if there was any children from previously, - // delete the children, as the cluster type is no longer an aggregate - // cluster. - if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { - for _, child := range c.children { - c.clusterHandler.createdClusters[child].delete() - } - c.children = nil - c.maxDepthErr = nil - // This is an update in the one leaf node, should try to send an update - // to the parent CDS balancer. - // - // Note that this update might be a duplicate from the previous one. - // Because the update contains not only the cluster name to watch, but - // also the extra fields (e.g. security config). There's no good way to - // compare all the fields. - c.clusterHandler.constructClusterUpdate() - return - } - - // Aggregate cluster handling. - if len(clusterUpdate.PrioritizedClusterNames) >= 1 { - if c.depth == maxDepth-1 { - // For a ClusterUpdate, the only update CDS cares about is the most - // recent one, so opportunistically drain the update channel before - // sending the new update. - select { - case <-c.clusterHandler.updateChannel: - default: - } - c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: errExceedsMaxDepth} - c.children = []string{} - c.maxDepthErr = errExceedsMaxDepth - return - } - } - - newChildren := make(map[string]bool) - for _, childName := range clusterUpdate.PrioritizedClusterNames { - newChildren[childName] = true - } - - // These booleans help determine whether this callback will ping the overall - // clusterHandler to try and construct an update to send back to CDS. This - // will be determined by whether there would be a change in the overall - // clusterUpdate for the whole tree (ex. change in clusterUpdate for current - // cluster or a deleted child) and also if there's even a possibility for - // the update to build (ex. if a child is created and a watch is started, - // that child hasn't received an update yet due to the mutex lock on this - // callback). - var createdChild bool - - // This map will represent the current children of the cluster. It will be - // first added to in order to represent the new children. It will then have - // any children deleted that are no longer present. - mapCurrentChildren := make(map[string]bool) - for _, child := range c.children { - mapCurrentChildren[child] = true - } - - // Add and construct any new child nodes. - for child := range newChildren { - if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { - createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler, c.depth+1) - } - } - - // Delete any child nodes no longer in the aggregate cluster's children. - for child := range mapCurrentChildren { - if _, stillAChild := newChildren[child]; !stillAChild { - c.clusterHandler.createdClusters[child].delete() - delete(mapCurrentChildren, child) - } - } - - c.children = clusterUpdate.PrioritizedClusterNames - - c.maxDepthErr = nil - // If the cluster is an aggregate cluster, if this callback created any new - // child cluster nodes, then there's no possibility for a full cluster - // update to successfully build, as those created children will not have - // received an update yet. Even if this update did not delete a child, there - // is still a possibility for the cluster update to build, as the aggregate - // cluster can ignore duplicated children and thus the update can fill out - // the full cluster update tree. - if !createdChild { - c.clusterHandler.constructClusterUpdate() - } -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go new file mode 100644 index 0000000000000..0b0d168376d74 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go @@ -0,0 +1,58 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "context" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// clusterWatcher implements the xdsresource.ClusterWatcher interface, and is +// passed to the xDS client as part of the WatchResource() API. +// +// It watches a single cluster and handles callbacks from the xDS client by +// scheduling them on the parent LB policy's serializer. +type clusterWatcher struct { + name string + parent *cdsBalancer +} + +func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData) { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterUpdate(cw.name, u.Resource) + }) +} + +func (cw *clusterWatcher) OnError(err error) { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterError(cw.name, err) + }) +} + +func (cw *clusterWatcher) OnResourceDoesNotExist() { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterResourceNotFound(cw.name) + }) +} + +// watcherState groups the state associated with a clusterWatcher. +type watcherState struct { + watcher *clusterWatcher // The underlying watcher. + cancelWatch func() // Cancel func to cancel the watch. + lastUpdate *xdsresource.ClusterUpdate // Most recent update received for this cluster. +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index b9a81e9ba8293..151c54dae6d09 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -200,7 +200,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { for dm, r := range rr.childrenMap { if !newDMs[dm] { delete(rr.childrenMap, dm) - r.r.stop() + go r.r.stop() } } // Regenerate even if there's no change in discovery mechanism, in case diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index 02470ddca5e45..06f6a47519c41 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/wrr" @@ -229,19 +230,30 @@ func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPo func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 { var hash uint64 var generatedHash bool + var md, emd metadata.MD + var mdRead bool for _, policy := range hashPolicies { var policyHash uint64 var generatedPolicyHash bool switch policy.HashPolicyType { case xdsresource.HashPolicyTypeHeader: - md, ok := metadata.FromOutgoingContext(rpcInfo.Context) - if !ok { + if strings.HasSuffix(policy.HeaderName, "-bin") { continue } - values := md.Get(policy.HeaderName) - // If the header isn't present, no-op. + if !mdRead { + md, _ = metadata.FromOutgoingContext(rpcInfo.Context) + emd, _ = grpcutil.ExtraMetadata(rpcInfo.Context) + mdRead = true + } + values := emd.Get(policy.HeaderName) if len(values) == 0 { - continue + // Extra metadata (e.g. the "content-type" header) takes + // precedence over the user's metadata. + values = md.Get(policy.HeaderName) + if len(values) == 0 { + // If the header isn't present at all, this policy is a no-op. + continue + } } joinedValues := strings.Join(values, ",") if policy.Regex != nil { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 44f6d3bc0a1cf..542c5e025fd1b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -32,7 +32,6 @@ import ( type XDSClient interface { WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() - WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index e503349dbc29a..5866221e2696d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -81,37 +81,6 @@ func (c *clientImpl) WatchRouteConfig(resourceName string, cb func(xdsresource.R return xdsresource.WatchRouteConfig(c, resourceName, watcher) } -// This is only required temporarily, while we modify the -// clientImpl.WatchCluster API to be implemented via the wrapper WatchCluster() -// API which calls the WatchResource() API. -type clusterWatcher struct { - resourceName string - cb func(xdsresource.ClusterUpdate, error) -} - -func (c *clusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData) { - c.cb(update.Resource, nil) -} - -func (c *clusterWatcher) OnError(err error) { - c.cb(xdsresource.ClusterUpdate{}, err) -} - -func (c *clusterWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", c.resourceName) - c.cb(xdsresource.ClusterUpdate{}, err) -} - -// WatchCluster uses CDS to discover information about the Cluster resource -// identified by resourceName. -// -// WatchCluster can be called multiple times, with same or different -// clusterNames. Each call will start an independent watcher for the resource. -func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { - watcher := &clusterWatcher{resourceName: resourceName, cb: cb} - return xdsresource.WatchCluster(c, resourceName, watcher) -} - // WatchResource uses xDS to discover the resource associated with the provided // resource name. The resource type implementation determines how xDS requests // are sent out and how responses are deserialized and validated. Upon receipt diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go index 89ffc4fcec661..4b8ca29ce93f3 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -25,6 +25,7 @@ import ( "time" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal" @@ -100,54 +101,36 @@ func (t *Transport) lrsRunner(ctx context.Context) { node := proto.Clone(t.nodeProto).(*v3corepb.Node) node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") - backoffAttempt := 0 - backoffTimer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-backoffTimer.C: - case <-ctx.Done(): - backoffTimer.Stop() - return + runLoadReportStream := func() error { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) + if err != nil { + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) + return nil } + t.logger.Infof("Created LRS stream to server %q", t.serverURI) - // We reset backoff state when we successfully receive at least one - // message from the server. - resetBackoff := func() bool { - // streamCtx is created and canceled in case we terminate the stream - // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring - // goroutine. - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) - if err != nil { - t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) - return false - } - t.logger.Infof("Created LRS stream to server %q", t.serverURI) - - if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { - t.logger.Warningf("Sending first LRS request failed: %v", err) - return false - } - - clusters, interval, err := t.recvFirstLoadStatsResponse(stream) - if err != nil { - t.logger.Warningf("Reading from LRS stream failed: %v", err) - return false - } - - t.sendLoads(streamCtx, stream, clusters, interval) - return true - }() + if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { + t.logger.Warningf("Sending first LRS request failed: %v", err) + return nil + } - if resetBackoff { - backoffTimer.Reset(0) - backoffAttempt = 0 - } else { - backoffTimer.Reset(t.backoff(backoffAttempt)) - backoffAttempt++ + clusters, interval, err := t.recvFirstLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("Reading from LRS stream failed: %v", err) + return nil } + + // We reset backoff state when we successfully receive at least one + // message from the server. + t.sendLoads(streamCtx, stream, clusters, interval) + return backoff.ErrResetBackoff } + backoff.RunF(ctx, runLoadReportStream, t.backoff) } func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go index 86803588a7cc2..001552d7b4798 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -325,43 +325,29 @@ func (t *Transport) adsRunner(ctx context.Context) { go t.send(ctx) - backoffAttempt := 0 - backoffTimer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-backoffTimer.C: - case <-ctx.Done(): - backoffTimer.Stop() - return + // We reset backoff state when we successfully receive at least one + // message from the server. + runStreamWithBackoff := func() error { + stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) + if err != nil { + t.onErrorHandler(err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) + return nil } + t.logger.Infof("ADS stream created") - // We reset backoff state when we successfully receive at least one - // message from the server. - resetBackoff := func() bool { - stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) - if err != nil { - t.onErrorHandler(err) - t.logger.Warningf("Creating new ADS stream failed: %v", err) - return false - } - t.logger.Infof("ADS stream created") - - select { - case <-t.adsStreamCh: - default: - } - t.adsStreamCh <- stream - return t.recv(stream) - }() - - if resetBackoff { - backoffTimer.Reset(0) - backoffAttempt = 0 - } else { - backoffTimer.Reset(t.backoff(backoffAttempt)) - backoffAttempt++ + select { + case <-t.adsStreamCh: + default: + } + t.adsStreamCh <- stream + msgReceived := t.recv(stream) + if msgReceived { + return backoff.ErrResetBackoff } + return nil } + backoff.RunF(ctx, runStreamWithBackoff, t.backoff) } // send is a separate goroutine for sending resource requests on the ADS stream. diff --git a/vendor/modules.txt b/vendor/modules.txt index ae84c85f97224..76a30c5a6689e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -464,7 +464,7 @@ github.com/coreos/go-systemd/sdjournal ## explicit; go 1.12 github.com/coreos/go-systemd/v22/activation github.com/coreos/go-systemd/v22/journal -# github.com/cristalhq/hedgedhttp v0.7.2 +# github.com/cristalhq/hedgedhttp v0.9.1 ## explicit; go 1.16 github.com/cristalhq/hedgedhttp # github.com/d4l3k/messagediff v1.2.1 @@ -816,7 +816,7 @@ github.com/google/s2a-go/internal/v2/certverifier github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore github.com/google/s2a-go/stream -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.3.1 ## explicit github.com/google/uuid # github.com/googleapis/enterprise-certificate-proxy v0.2.5 @@ -853,8 +853,8 @@ github.com/gorilla/websocket # github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 ## explicit; go 1.17 github.com/grafana/cloudflare-go -# github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 -## explicit; go 1.19 +# github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f +## explicit; go 1.20 github.com/grafana/dskit/aws github.com/grafana/dskit/backoff github.com/grafana/dskit/concurrency @@ -1615,8 +1615,8 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.10.0 -## explicit; go 1.17 +# golang.org/x/oauth2 v0.11.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials @@ -1736,7 +1736,7 @@ google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 +# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -1746,7 +1746,7 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.3 +# google.golang.org/grpc v1.59.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes From e6940691c98b47f9fed1d2e2a5c73de311464e43 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Wed, 22 Nov 2023 13:32:08 +0100 Subject: [PATCH 30/48] Set query plan when copying LokiRequest (#11291) **What this PR does / why we need it**: The recent change https://github.com/grafana/loki/pull/11246 requires that `LokiRequest.Plan` is always set. **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --- pkg/querier/queryrange/split_by_interval.go | 1 + pkg/querier/queryrange/split_by_interval_test.go | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index b4245375bce66..84b3a519f1269 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -259,6 +259,7 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran Path: r.Path, StartTs: start, EndTs: end, + Plan: r.Plan, }) }) case *LokiSeriesRequest: diff --git a/pkg/querier/queryrange/split_by_interval_test.go b/pkg/querier/queryrange/split_by_interval_test.go index ce02105d091fe..78d74b111a12a 100644 --- a/pkg/querier/queryrange/split_by_interval_test.go +++ b/pkg/querier/queryrange/split_by_interval_test.go @@ -17,7 +17,9 @@ import ( "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" ) @@ -57,25 +59,31 @@ var testSchemasTSDB = func() []config.PeriodConfig { func Test_splitQuery(t *testing.T) { buildLokiRequest := func(start, end time.Time) queryrangebase.Request { return &LokiRequest{ - Query: "foo", + Query: `{app="foo"}`, Limit: 1, Step: 2, StartTs: start, EndTs: end, Direction: logproto.BACKWARD, Path: "/path", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"}`), + }, } } buildLokiRequestWithInterval := func(start, end time.Time) queryrangebase.Request { return &LokiRequest{ - Query: "foo", + Query: `{app="foo"}`, Limit: 1, Interval: 2, StartTs: start, EndTs: end, Direction: logproto.BACKWARD, Path: "/path", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"}`), + }, } } From af177034edc0a69b8982a79bf55593fa579fbf66 Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Wed, 22 Nov 2023 08:07:43 -0500 Subject: [PATCH 31/48] Compression of bloom blocks (#11267) **What this PR does / why we need it**: Compress bloom blocks when writing to object storage, and uncompress when reading from object storage. **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --- pkg/bloomcompactor/bloomcompactor.go | 14 +++- pkg/storage/bloom/v1/archive.go | 42 ++++++++++- pkg/storage/bloom/v1/block_writer.go | 8 +- pkg/storage/bloom/v1/reader.go | 4 +- .../stores/shipper/bloomshipper/client.go | 75 ++++++++++++++++--- .../shipper/bloomshipper/client_test.go | 71 +++++++++++++++--- .../stores/shipper/bloomshipper/shipper.go | 5 +- .../shipper/bloomshipper/shipper_test.go | 5 +- 8 files changed, 190 insertions(+), 34 deletions(-) diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index 71dbb08380d91..f004936e10aff 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -61,9 +61,11 @@ import ( "github.com/grafana/loki/pkg/util" ) +// TODO: Make a constants file somewhere const ( - fpRate = 0.01 - bloomFileName = "bloom" + fpRate = 0.01 + bloomFileName = "bloom" + seriesFileName = "series" ) type Compactor struct { @@ -485,6 +487,11 @@ func buildBloomBlock(ctx context.Context, logger log.Logger, bloomForChks v1.Ser level.Error(logger).Log("reading bloomBlock", err) } + indexFile, err := os.Open(filepath.Join(localDst, seriesFileName)) + if err != nil { + level.Error(logger).Log("reading bloomBlock", err) + } + blocks := bloomshipper.Block{ BlockRef: bloomshipper.BlockRef{ Ref: bloomshipper.Ref{ @@ -498,7 +505,8 @@ func buildBloomBlock(ctx context.Context, logger log.Logger, bloomForChks v1.Ser }, IndexPath: job.IndexPath(), }, - Data: blockFile, + BloomData: blockFile, + IndexData: indexFile, } return blocks, nil diff --git a/pkg/storage/bloom/v1/archive.go b/pkg/storage/bloom/v1/archive.go index 4c0b124a05cf4..7f252e3bde03e 100644 --- a/pkg/storage/bloom/v1/archive.go +++ b/pkg/storage/bloom/v1/archive.go @@ -5,12 +5,46 @@ import ( "io" "os" "path/filepath" + "strings" "github.com/pkg/errors" "github.com/grafana/loki/pkg/chunkenc" ) +func TarGzMemory(dst io.Writer, src *ByteReader) error { + gzipper := chunkenc.GetWriterPool(chunkenc.EncGZIP).GetWriter(dst) + defer gzipper.Close() + + tarballer := tar.NewWriter(gzipper) + defer tarballer.Close() + + header := &tar.Header{ + Name: SeriesFileName, + Size: int64(src.index.Len()), + } + // Write the header + if err := tarballer.WriteHeader(header); err != nil { + return errors.Wrapf(err, "error writing tar header for index file") + } + // Write the file contents + if _, err := tarballer.Write(src.index.Bytes()); err != nil { + return errors.Wrapf(err, "error writing file contents for index file") + } + + header = &tar.Header{ + Name: BloomFileName, + Size: int64(src.blooms.Len()), + } + if err := tarballer.WriteHeader(header); err != nil { + return errors.Wrapf(err, "error writing tar header for bloom file") + } + if _, err := tarballer.Write(src.blooms.Bytes()); err != nil { + return errors.Wrapf(err, "error writing file contents for bloom file") + } + return nil +} + func TarGz(dst io.Writer, src *DirectoryBlockReader) error { if err := src.Init(); err != nil { return errors.Wrap(err, "error initializing directory block reader") @@ -77,7 +111,13 @@ func UnTarGz(dst string, r io.Reader) error { // if it's a file create it case tar.TypeReg: - f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode)) + err := os.MkdirAll(target[:strings.LastIndex(target, "/")], 0755) + if err != nil { + return errors.Wrapf(err, "error creating directory %s", target) + } + // TODO: We need to settle on how best to handle file permissions and ownership + // This may be utilizing a zip file instead of tar.gz + f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0755) if err != nil { return errors.Wrapf(err, "error creating file %s", target) } diff --git a/pkg/storage/bloom/v1/block_writer.go b/pkg/storage/bloom/v1/block_writer.go index 317d1e598414a..99ab65ef9cd40 100644 --- a/pkg/storage/bloom/v1/block_writer.go +++ b/pkg/storage/bloom/v1/block_writer.go @@ -12,8 +12,8 @@ import ( ) const ( - bloomFileName = "bloom" - seriesFileName = "series" + BloomFileName = "bloom" + SeriesFileName = "series" ) type BlockWriter interface { @@ -66,12 +66,12 @@ func (b *DirectoryBlockWriter) Init() error { return errors.Wrap(err, "creating bloom block dir") } - b.index, err = os.Create(filepath.Join(b.dir, seriesFileName)) + b.index, err = os.Create(filepath.Join(b.dir, SeriesFileName)) if err != nil { return errors.Wrap(err, "creating series file") } - b.blooms, err = os.Create(filepath.Join(b.dir, bloomFileName)) + b.blooms, err = os.Create(filepath.Join(b.dir, BloomFileName)) if err != nil { return errors.Wrap(err, "creating bloom file") } diff --git a/pkg/storage/bloom/v1/reader.go b/pkg/storage/bloom/v1/reader.go index e4de9609b9082..d5c70a2b64d83 100644 --- a/pkg/storage/bloom/v1/reader.go +++ b/pkg/storage/bloom/v1/reader.go @@ -49,12 +49,12 @@ func NewDirectoryBlockReader(dir string) *DirectoryBlockReader { func (r *DirectoryBlockReader) Init() error { if !r.initialized { var err error - r.index, err = os.Open(filepath.Join(r.dir, seriesFileName)) + r.index, err = os.Open(filepath.Join(r.dir, SeriesFileName)) if err != nil { return errors.Wrap(err, "opening series file") } - r.blooms, err = os.Open(filepath.Join(r.dir, bloomFileName)) + r.blooms, err = os.Open(filepath.Join(r.dir, BloomFileName)) if err != nil { return errors.Wrap(err, "opening bloom file") } diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go index a68959e1d908e..76cc4c2bfde9f 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client.go +++ b/pkg/storage/stores/shipper/bloomshipper/client.go @@ -1,16 +1,20 @@ package bloomshipper import ( + "bufio" "bytes" "context" "encoding/json" "fmt" "io" + "os" "path/filepath" "strconv" "strings" "time" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/prometheus/common/model" "github.com/grafana/dskit/concurrency" @@ -75,7 +79,8 @@ type MetaClient interface { type Block struct { BlockRef - Data io.ReadCloser + IndexData io.ReadCloser + BloomData io.ReadCloser } type BlockClient interface { @@ -205,13 +210,35 @@ func (b *BloomClient) GetBlocks(ctx context.Context, references []BlockRef) (cha return fmt.Errorf("error while period lookup: %w", err) } objectClient := b.periodicObjectClients[period] - readCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref)) + compressedObjectReadCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref)) if err != nil { return fmt.Errorf("error while fetching object from storage: %w", err) } + defer func() { + compressedObjectReadCloser.Close() + }() + + workingDirectoryPath := filepath.Join(b.storageConfig.BloomShipperConfig.WorkingDirectory, reference.BlockPath, strconv.FormatInt(time.Now().UTC().UnixMilli(), 10)) + err = v1.UnTarGz(workingDirectoryPath, compressedObjectReadCloser) + if err != nil { + return fmt.Errorf("error while untarring: %w", err) + } + + indexFile, err := os.Open(filepath.Join(workingDirectoryPath, v1.SeriesFileName)) + if err != nil { + return fmt.Errorf("error while opening index file: %w", err) + } + indexReader := bufio.NewReader(indexFile) + + bloomFile, err := os.Open(filepath.Join(workingDirectoryPath, v1.BloomFileName)) + if err != nil { + return fmt.Errorf("error while opening bloom file: %w", err) + } + bloomReader := bufio.NewReader(bloomFile) blocksChannel <- Block{ - BlockRef: reference, - Data: readCloser, + BlockRef: reference, + BloomData: io.NopCloser(bloomReader), + IndexData: io.NopCloser(indexReader), } return nil }) @@ -225,7 +252,25 @@ func (b *BloomClient) GetBlocks(ctx context.Context, references []BlockRef) (cha return blocksChannel, errChannel } -// TODO zip (archive) blocks before uploading to storage +func readCloserToBuffer(rc io.ReadCloser) *bytes.Buffer { + defer rc.Close() + + // Read the data from io.ReadCloser + data, err := io.ReadAll(rc) + if err != nil { + return nil + } + + // Write the data into a bytes.Buffer + var buf bytes.Buffer + _, err = buf.Write(data) + if err != nil { + return nil + } + + return &buf +} + func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, error) { results := make([]Block, len(blocks)) //todo move concurrency to the config @@ -233,7 +278,11 @@ func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, e block := blocks[idx] defer func(Data io.ReadCloser) { _ = Data.Close() - }(block.Data) + }(block.BloomData) + + defer func(Data io.ReadCloser) { + _ = Data.Close() + }(block.IndexData) period, err := findPeriod(b.periodicConfigs, block.StartTimestamp) if err != nil { @@ -241,11 +290,19 @@ func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, e } key := createBlockObjectKey(block.Ref) objectClient := b.periodicObjectClients[period] - data, err := io.ReadAll(block.Data) + byteReader := v1.NewByteReader(readCloserToBuffer(block.IndexData), readCloserToBuffer(block.BloomData)) + + // TODO: Right now, this is asymetrical with the GetBlocks path. We have all the pieces + // in memory now, so it doesn't necessarily make sense to write the files to disk. That may change + // as we finalize on an archive format, and we may want to just house the downloaded files in memory instead. + // Create a buffer to write data + buf := new(bytes.Buffer) + err = v1.TarGzMemory(buf, byteReader) if err != nil { - return fmt.Errorf("error while reading object data: %w", err) + return fmt.Errorf("error while tarring object data: %w", err) } - err = objectClient.PutObject(ctx, key, bytes.NewReader(data)) + + err = objectClient.PutObject(ctx, key, bytes.NewReader(buf.Bytes())) if err != nil { return fmt.Errorf("error updloading block file: %w", err) } diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go index 4c4b6f855a8ec..6031e8bb06df0 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go @@ -1,7 +1,9 @@ package bloomshipper import ( + "archive/tar" "bytes" + "compress/gzip" "context" "encoding/json" "fmt" @@ -13,6 +15,8 @@ import ( "testing" "time" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + aws_io "github.com/aws/smithy-go/io" "github.com/google/uuid" "github.com/prometheus/common/model" @@ -183,6 +187,8 @@ func Test_BloomClient_GetBlocks(t *testing.T) { secondBlockData := createBlockFile(t, secondBlockFullPath) require.FileExists(t, firstBlockFullPath) require.FileExists(t, secondBlockFullPath) + rootDir := filepath.Join(fsNamedStores["folder-1"].Directory, "bloom") + defer os.RemoveAll(rootDir) firstBlockRef := BlockRef{ Ref: Ref{ @@ -212,7 +218,7 @@ func Test_BloomClient_GetBlocks(t *testing.T) { blocksToDownload := []BlockRef{firstBlockRef, secondBlockRef} blocksCh, errorsCh := shipper.GetBlocks(context.Background(), blocksToDownload) - blocks := make(map[string]string) + blocks := make(map[string][]byte) func() { timout := time.After(5 * time.Second) for { @@ -226,13 +232,14 @@ func Test_BloomClient_GetBlocks(t *testing.T) { if !ok { return } - blockData, err := io.ReadAll(block.Data) + blockData, err := io.ReadAll(block.BloomData) require.NoError(t, err) - blocks[block.BlockRef.BlockPath] = string(blockData) + blocks[block.BlockRef.BlockPath] = blockData } } }() + defer os.RemoveAll("./bloom") firstBlockActualData, exists := blocks[firstBlockRef.BlockPath] require.Truef(t, exists, "data for the first block must be present in the results: %+v", blocks) @@ -245,9 +252,42 @@ func Test_BloomClient_GetBlocks(t *testing.T) { require.Len(t, blocks, 2) } +func extractFileFromTGZ(tarGzData []byte, targetFileName string) []byte { + gzReader, err := gzip.NewReader(bytes.NewReader(tarGzData)) + if err != nil { + return nil + } + defer gzReader.Close() + + tarReader := tar.NewReader(gzReader) + + for { + header, err := tarReader.Next() + + if err == io.EOF { + break + } + + if err != nil { + return nil + } + + if header.Name == targetFileName { + buffer := new(bytes.Buffer) + if _, err := io.Copy(buffer, tarReader); err != nil { + return nil + } + return buffer.Bytes() + } + } + + return nil +} + func Test_BloomClient_PutBlocks(t *testing.T) { shipper := createShipper(t) blockForFirstFolderData := "data1" + indexForFirstFolderData := "index1" blockForFirstFolder := Block{ BlockRef: BlockRef{ Ref: Ref{ @@ -261,10 +301,12 @@ func Test_BloomClient_PutBlocks(t *testing.T) { }, IndexPath: uuid.New().String(), }, - Data: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForFirstFolderData))}, + BloomData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForFirstFolderData))}, + IndexData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(indexForFirstFolderData))}, } blockForSecondFolderData := "data2" + indexForSecondFolderData := "index2" blockForSecondFolder := Block{ BlockRef: BlockRef{ Ref: Ref{ @@ -278,7 +320,8 @@ func Test_BloomClient_PutBlocks(t *testing.T) { }, IndexPath: uuid.New().String(), }, - Data: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForSecondFolderData))}, + BloomData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForSecondFolderData))}, + IndexData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(indexForSecondFolderData))}, } results, err := shipper.PutBlocks(context.Background(), []Block{blockForFirstFolder, blockForSecondFolder}) @@ -300,7 +343,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { require.FileExists(t, savedFilePath) savedData, err := os.ReadFile(savedFilePath) require.NoError(t, err) - require.Equal(t, blockForFirstFolderData, string(savedData)) + require.Equal(t, blockForFirstFolderData, string(extractFileFromTGZ(savedData, "bloom"))) secondResultBlock := results[1] path = secondResultBlock.BlockPath @@ -319,7 +362,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { require.FileExists(t, savedFilePath) savedData, err = os.ReadFile(savedFilePath) require.NoError(t, err) - require.Equal(t, blockForSecondFolderData, string(savedData)) + require.Equal(t, blockForSecondFolderData, string(extractFileFromTGZ(savedData, "bloom"))) } func Test_BloomClient_DeleteBlocks(t *testing.T) { @@ -364,13 +407,19 @@ func Test_BloomClient_DeleteBlocks(t *testing.T) { require.NoFileExists(t, block2Path) } -func createBlockFile(t *testing.T, path string) string { +func createBlockFile(t *testing.T, path string) []byte { err := os.MkdirAll(path[:strings.LastIndex(path, "/")], 0755) require.NoError(t, err) - fileContent := uuid.NewString() - err = os.WriteFile(path, []byte(fileContent), 0700) + bloomContent := []byte(uuid.NewString()) + indexContent := []byte(uuid.NewString()) + outputFile, err := os.Create(path) + require.NoError(t, err) + byteReader := v1.NewByteReader(bytes.NewBuffer(indexContent), bytes.NewBuffer(bloomContent)) + err = v1.TarGzMemory(outputFile, byteReader) + require.NoError(t, err) + err = outputFile.Close() require.NoError(t, err) - return fileContent + return bloomContent } func Test_TablesByPeriod(t *testing.T) { diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go index 2df1f41cd4a25..0272a8e4f736a 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go @@ -207,7 +207,8 @@ func (s *Shipper) createBlockQuerier(directory string) *v1.BlockQuerier { } func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, error) { - defer block.Data.Close() + defer block.BloomData.Close() + defer block.IndexData.Close() archivePath := filepath.Join(workingDirectoryPath, block.BlockPath[strings.LastIndex(block.BlockPath, delimiter)+1:]) archiveFile, err := os.Create(archivePath) @@ -215,7 +216,7 @@ func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, err return "", fmt.Errorf("error creating empty file to store the archiver: %w", err) } defer archiveFile.Close() - _, err = io.Copy(archiveFile, block.Data) + _, err = io.Copy(archiveFile, block.BloomData) if err != nil { return "", fmt.Errorf("error writing data to archive file: %w", err) } diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go index 45450c0e3838b..2f662b2b793d5 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go @@ -245,8 +245,9 @@ func Test_Shipper_extractBlock(t *testing.T) { shipper := Shipper{config: config.Config{WorkingDirectory: workingDir}} ts := time.Now().UTC() block := Block{ - BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"}, - Data: blockFile, + BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"}, + BloomData: blockFile, + IndexData: seriesFile, } actualPath, err := shipper.extractBlock(&block, ts) From 02b074d88924e128bbf19902b3eb6846ccef8e75 Mon Sep 17 00:00:00 2001 From: Bilal Khan <64713734+ibilalkayy@users.noreply.github.com> Date: Wed, 22 Nov 2023 19:23:53 +0500 Subject: [PATCH 32/48] Added a new paragraph in the contribution guide about an error because it occurred to me and may occur to others also. (#11131) **What this PR does / why we need it**: This PR contains a paragraph in which there is a remember guide when running the `make docs` command. For some users, it may give an error because they have not added the `/tmp` path into the Docker settings. Without this setting, running the `http://localhost:3002/docs/loki/latest/` URL won't work. That's why I added this guide so it becomes easy for others also. **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --------- Signed-off-by: Bilal Khan Co-authored-by: J Stickler Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> Co-authored-by: Jack Baldry --- CONTRIBUTING.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5226d96ed37c4..b643a46ddf6f9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -149,4 +149,11 @@ To get a local preview of the documentation: 3. Run the command `make docs`. This uses the `grafana/docs` image which internally uses Hugo to generate the static site. 4. Open http://localhost:3002/docs/loki/latest/ to review your changes. +**Remember:** If running `make docs` command gave you the following error. + + - `path /tmp/make-docs.Dcq is not shared from the host and is not known to Docker.` + - `You can configure shared paths from Docker -> Preferences... -> Resources -> File Sharing.` + +Then you can go to Docker Desktop settings and open the resources, add the temporary directory path `/tmp`. + > Note that `make docs` uses a lot of memory. If it crashes, increase the memory allocated to Docker and try again. From b56f36fe6b7f5f581382898dbfc078ad3644f4a9 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Wed, 22 Nov 2023 16:47:35 +0100 Subject: [PATCH 33/48] Fix bloom compactor startup when using unsupported index type (#11285) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR fixes the check in the bloom compactor startup to ignore all other index types than TSDB. When updating an existing Loki installation that uses BoltDB Shipper in the schema config, you get the following error: ``` invalid tsdb path: /data/boltdb-shipper-cache/XXXXXXXXX_index_19682/write-0-1639160260677624223-1700595900 ││ create index shipper ││ github.com/grafana/loki/pkg/bloomcompactor.New ││ /src/loki/pkg/bloomcompactor/bloomcompactor.go:159 ││ github.com/grafana/loki/pkg/loki.(*Loki).initBloomCompactor ││ /src/loki/pkg/loki/modules.go:1406 ││ github.com/grafana/dskit/modules.(*Manager).initModule ││ /src/loki/vendor/github.com/grafana/dskit/modules/modules.go:136 ││ github.com/grafana/dskit/modules.(*Manager).InitModuleServices ││ /src/loki/vendor/github.com/grafana/dskit/modules/modules.go:108 ││ github.com/grafana/loki/pkg/loki.(*Loki).Run ``` --------- Signed-off-by: Christian Haudum --- pkg/bloomcompactor/bloomcompactor.go | 18 ++- pkg/bloomcompactor/bloomcompactor_test.go | 133 ++++++++++++++++++++-- pkg/bloomcompactor/sharding.go | 15 +++ pkg/util/ring/sharding.go | 19 ++++ 4 files changed, 163 insertions(+), 22 deletions(-) diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index f004936e10aff..cc11cc3e1ccf9 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -85,6 +85,7 @@ type Compactor struct { sharding ShardingStrategy metrics *metrics + reg prometheus.Registerer } type storeClient struct { @@ -110,6 +111,7 @@ func New( schemaCfg: schemaConfig, sharding: sharding, limits: limits, + reg: r, } // Configure BloomClient for meta.json management @@ -121,14 +123,8 @@ func New( c.storeClients = make(map[config.DayTime]storeClient) for i, periodicConfig := range schemaConfig.Configs { - var indexStorageCfg indexshipper.Config - switch periodicConfig.IndexType { - case config.TSDBType: - indexStorageCfg = storageCfg.TSDBShipperConfig - case config.BoltDBShipperType: - indexStorageCfg = storageCfg.BoltDBShipperConfig.Config - default: - level.Warn(c.logger).Log("msg", "skipping period because index type is unsupported") + if periodicConfig.IndexType != config.TSDBType { + level.Warn(c.logger).Log("msg", "skipping schema period because index type is not supported", "index_type", periodicConfig.IndexType, "period", periodicConfig.From) continue } @@ -145,7 +141,7 @@ func New( indexShipper, err := indexshipper.NewIndexShipper( periodicConfig.IndexTables.PathPrefix, - indexStorageCfg, + storageCfg.TSDBShipperConfig, objectClient, limits, nil, @@ -153,7 +149,7 @@ func New( return tsdb.OpenShippableTSDB(p) }, periodicConfig.GetIndexTableNumberRange(periodEndTime), - prometheus.WrapRegistererWithPrefix("loki_tsdb_shipper_", prometheus.DefaultRegisterer), + prometheus.WrapRegistererWithPrefix("loki_tsdb_shipper_", r), logger, ) @@ -355,7 +351,7 @@ func (c *Compactor) compactTenant(ctx context.Context, logger log.Logger, sc sto } // Tokenizer is not thread-safe so we need one per goroutine. - bt, _ := v1.NewBloomTokenizer(prometheus.DefaultRegisterer) + bt, _ := v1.NewBloomTokenizer(c.reg) // TODO: Use ForEachConcurrent? errs := multierror.New() diff --git a/pkg/bloomcompactor/bloomcompactor_test.go b/pkg/bloomcompactor/bloomcompactor_test.go index 65c6779750320..6221610321b69 100644 --- a/pkg/bloomcompactor/bloomcompactor_test.go +++ b/pkg/bloomcompactor/bloomcompactor_test.go @@ -8,10 +8,11 @@ import ( "testing" "time" + "github.com/go-kit/log" "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/kv" "github.com/grafana/dskit/kv/consul" "github.com/grafana/dskit/ring" - "github.com/grafana/dskit/server" "github.com/grafana/dskit/services" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -23,7 +24,6 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/client/local" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper" - util_log "github.com/grafana/loki/pkg/util/log" lokiring "github.com/grafana/loki/pkg/util/ring" "github.com/grafana/loki/pkg/validation" ) @@ -33,10 +33,124 @@ const ( workingDirName = "working-dir" ) +func parseDayTime(s string) config.DayTime { + t, err := time.Parse("2006-01-02", s) + if err != nil { + panic(err) + } + return config.DayTime{ + Time: model.TimeFromUnix(t.Unix()), + } +} + +func TestCompactor_StartStopService(t *testing.T) { + shardingStrategy := NewNoopStrategy() + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + + cm := storage.NewClientMetrics() + t.Cleanup(cm.Unregister) + + var limits validation.Limits + limits.RegisterFlags(flag.NewFlagSet("limits", flag.PanicOnError)) + overrides, _ := validation.NewOverrides(limits, nil) + + periodConfigUnsupported := config.PeriodConfig{ + From: parseDayTime("2023-09-01"), + IndexType: config.BoltDBShipperType, + ObjectType: config.StorageTypeFileSystem, + Schema: "v13", + RowShards: 16, + IndexTables: config.IndexPeriodicTableConfig{ + PathPrefix: "index/", + PeriodicTableConfig: config.PeriodicTableConfig{ + Prefix: indexTablePrefix, + Period: config.ObjectStorageIndexRequiredPeriod, + }, + }, + } + + periodConfigSupported := config.PeriodConfig{ + From: parseDayTime("2023-10-01"), + IndexType: config.TSDBType, + ObjectType: config.StorageTypeFileSystem, + Schema: "v13", + RowShards: 16, + IndexTables: config.IndexPeriodicTableConfig{ + PathPrefix: "index/", + PeriodicTableConfig: config.PeriodicTableConfig{ + Prefix: indexTablePrefix, + Period: config.ObjectStorageIndexRequiredPeriod, + }, + }, + } + + schemaCfg := config.SchemaConfig{ + Configs: []config.PeriodConfig{ + periodConfigUnsupported, + periodConfigSupported, + }, + } + + fsDir := t.TempDir() + tsdbDir := t.TempDir() + + storageCfg := storage.Config{ + FSConfig: local.FSConfig{ + Directory: fsDir, + }, + TSDBShipperConfig: indexshipper.Config{ + ActiveIndexDirectory: filepath.Join(tsdbDir, "index"), + ResyncInterval: 1 * time.Minute, + Mode: indexshipper.ModeReadWrite, + CacheLocation: filepath.Join(tsdbDir, "cache"), + }, + } + + t.Run("ignore unsupported index types in schema config", func(t *testing.T) { + kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), logger, reg) + t.Cleanup(func() { + closer.Close() + }) + + var cfg Config + flagext.DefaultValues(&cfg) + cfg.Enabled = true + cfg.WorkingDirectory = filepath.Join(t.TempDir(), workingDirName) + cfg.Ring = lokiring.RingConfig{ + KVStore: kv.Config{ + Mock: kvStore, + }, + } + + c, err := New(cfg, storageCfg, schemaCfg, overrides, logger, shardingStrategy, cm, reg) + require.NoError(t, err) + + err = services.StartAndAwaitRunning(context.Background(), c) + require.NoError(t, err) + + require.Equal(t, 1, len(c.storeClients)) + + // supported index type TSDB is present + sc, ok := c.storeClients[periodConfigSupported.From] + require.True(t, ok) + require.NotNil(t, sc) + + // unsupported index type BoltDB is not present + _, ok = c.storeClients[periodConfigUnsupported.From] + require.False(t, ok) + + err = services.StopAndAwaitTerminated(context.Background(), c) + require.NoError(t, err) + }) +} + func TestCompactor_RunCompaction(t *testing.T) { - servercfg := &server.Config{} - require.Nil(t, servercfg.LogLevel.Set("debug")) - util_log.InitLogger(servercfg, nil, false) + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + + cm := storage.NewClientMetrics() + t.Cleanup(cm.Unregister) tempDir := t.TempDir() indexDir := filepath.Join(tempDir, "index") @@ -79,7 +193,7 @@ func TestCompactor_RunCompaction(t *testing.T) { ) } - kvStore, cleanUp := consul.NewInMemoryClient(ring.GetCodec(), util_log.Logger, nil) + kvStore, cleanUp := consul.NewInMemoryClient(ring.GetCodec(), logger, nil) t.Cleanup(func() { assert.NoError(t, cleanUp.Close()) }) var cfg Config @@ -104,10 +218,7 @@ func TestCompactor_RunCompaction(t *testing.T) { limits.RegisterFlags(flag.NewFlagSet("limits", flag.PanicOnError)) overrides, _ := validation.NewOverrides(limits, nil) - clientMetrics := storage.NewClientMetrics() - t.Cleanup(clientMetrics.Unregister) - - ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, cfg.Ring, 1, 1, util_log.Logger, prometheus.DefaultRegisterer) + ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, cfg.Ring, 1, 1, logger, reg) require.NoError(t, err) err = ringManager.StartAsync(context.Background()) @@ -124,7 +235,7 @@ func TestCompactor_RunCompaction(t *testing.T) { shuffleSharding := NewShuffleShardingStrategy(ringManager.Ring, ringManager.RingLifecycler, overrides) - c, err := New(cfg, storageConfig, schemaCfg, overrides, util_log.Logger, shuffleSharding, clientMetrics, nil) + c, err := New(cfg, storageConfig, schemaCfg, overrides, logger, shuffleSharding, cm, nil) require.NoError(t, err) err = c.runCompaction(context.Background()) diff --git a/pkg/bloomcompactor/sharding.go b/pkg/bloomcompactor/sharding.go index 093c0c3ac9a31..ecbfe06a4c17a 100644 --- a/pkg/bloomcompactor/sharding.go +++ b/pkg/bloomcompactor/sharding.go @@ -41,3 +41,18 @@ func (s *ShuffleShardingStrategy) OwnsJob(job Job) (bool, error) { fpSharding := util_ring.NewFingerprintShuffleSharding(tenantRing, s.ringLifeCycler, RingOp) return fpSharding.OwnsFingerprint(uint64(job.Fingerprint())) } + +// NoopStrategy is an implementation of the ShardingStrategy that does not +// filter anything. +type NoopStrategy struct { + util_ring.NoopStrategy +} + +// OwnsJob implements TenantShuffleSharding. +func (s *NoopStrategy) OwnsJob(_ Job) (bool, error) { + return true, nil +} + +func NewNoopStrategy() *NoopStrategy { + return &NoopStrategy{NoopStrategy: util_ring.NoopStrategy{}} +} diff --git a/pkg/util/ring/sharding.go b/pkg/util/ring/sharding.go index cb549ec02bb90..45a53cf40cfe7 100644 --- a/pkg/util/ring/sharding.go +++ b/pkg/util/ring/sharding.go @@ -83,3 +83,22 @@ func (s *FingerprintShuffleSharding) OwnsFingerprint(fp uint64) (bool, error) { return rs.Includes(s.ringLifeCycler.GetInstanceAddr()), nil } + +// NoopStrategy is an implementation of the ShardingStrategy that does not +// shard anything. +type NoopStrategy struct{} + +// OwnsTenant implements TenantShuffleSharding. +func (s *NoopStrategy) OwnsTenant(_ string) bool { + return false +} + +// GetTenantSubRing implements TenantShuffleSharding. +func (s *NoopStrategy) GetTenantSubRing(_ string) ring.ReadRing { + return nil +} + +// OwnsFingerprint implements FingerprintSharding. +func (s *NoopStrategy) OwnsFingerprint(_ uint64) (bool, error) { + return false, nil +} From 146b1bb10b5b33f6122c40ea71b043a879ff1e6a Mon Sep 17 00:00:00 2001 From: Vitor Bruno de Oliveira Barth Date: Wed, 22 Nov 2023 13:43:52 -0300 Subject: [PATCH 34/48] Helm: Update MinIO Helm Chart version to 4.0.15 (#11188) **What this PR does / why we need it**: It bumps the version for the MinIO Helm Chart to v4.0.15. The current MinIO version (4.0.12) has a bug where it throws warnings for incorrect types. ``` coalesce.go:237: warning: skipped value for loki.minio.additionalLabels: Not a table. coalesce.go:237: warning: skipped value for loki.minio.additionalAnnotations: Not a table. ``` **Which issue(s) this PR fixes**: None **Special notes for your reviewer**: **Checklist** - [X] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [X] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --------- Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- production/helm/loki/CHANGELOG.md | 4 ++++ production/helm/loki/Chart.lock | 6 +++--- production/helm/loki/Chart.yaml | 4 ++-- production/helm/loki/README.md | 4 ++-- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 51dd2deb2be54..2f52addf52022 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 5.38.0 + +- [CHANGE] Changed MinIO Helm Chart version to 4.0.15 + ## 5.37.0 - [FEATURE] Add support for enabling tracing. diff --git a/production/helm/loki/Chart.lock b/production/helm/loki/Chart.lock index c2bbe88846859..17f1dafad7ae9 100644 --- a/production/helm/loki/Chart.lock +++ b/production/helm/loki/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: minio repository: https://charts.min.io/ - version: 4.0.12 + version: 4.0.15 - name: grafana-agent-operator repository: https://grafana.github.io/helm-charts version: 0.2.16 -digest: sha256:3605bf81141e70309ef7efab98523d59615f3f5cf4e7b2eb7fd2be04cd52c906 -generated: "2023-06-27T16:57:05.871386+02:00" +digest: sha256:56eeb13a669bc816c1452cde5d6dddc61f6893f8aff3da1d2b56ce3bdcbcf84d +generated: "2023-11-09T12:22:25.317696-03:00" diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 39e800d6193e0..2e2d94b49df38 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.9.2 -version: 5.37.0 +version: 5.38.0 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki @@ -13,7 +13,7 @@ icon: https://grafana.com/docs/loki/latest/logo_and_name.png dependencies: - name: minio alias: minio - version: 4.0.12 + version: 4.0.15 repository: https://charts.min.io/ condition: minio.enabled - name: grafana-agent-operator diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 7fc83086785b3..76ab849b64463 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.37.0](https://img.shields.io/badge/Version-5.37.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) +![Version: 5.38.0](https://img.shields.io/badge/Version-5.38.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode @@ -14,7 +14,7 @@ Helm chart for Grafana Loki in simple, scalable mode | Repository | Name | Version | |------------|------|---------| -| https://charts.min.io/ | minio(minio) | 4.0.12 | +| https://charts.min.io/ | minio(minio) | 4.0.15 | | https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.2.16 | Find more information in the Loki Helm Chart [documentation](https://grafana.com/docs/loki/next/installation/helm). From 13b58afdd91dde2d1e68dd430ba4c0c975235d59 Mon Sep 17 00:00:00 2001 From: Bilal Khan <64713734+ibilalkayy@users.noreply.github.com> Date: Thu, 23 Nov 2023 01:45:44 +0500 Subject: [PATCH 35/48] made some typo changes in the caching.md file (#11214) **What this PR does / why we need it**: There were some typo mistakes that I fixed in the **caching.md** file and it was mentioned in the issue that I have given below. **Which issue(s) this PR fixes**: Fixes [Issue #9073](https://github.com/grafana/loki/issues/9073) **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) Signed-off-by: Bilal Khan Co-authored-by: J Stickler --- docs/sources/operations/caching.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/operations/caching.md b/docs/sources/operations/caching.md index f46be0e9a8caa..0a20cc3d6e1ff 100644 --- a/docs/sources/operations/caching.md +++ b/docs/sources/operations/caching.md @@ -45,8 +45,8 @@ To enable and configure Memcached: The options `host` and `service` depend on the type of installation. For example, using the `bitnami/memcached` Helm Charts with the following commands, the `service` values are always `memcached`. ``` - helm upgrade --install chunk-cache -n loki bitnami/memcached -f memcached-overrides.yaml - helm upgrade --install results-cache -n loki bitnami/memcached -f memcached-overrides.yaml + helm upgrade --install chunk-cache -n loki bitnami/memcached -f memcached-overrides-chunk.yaml + helm upgrade --install results-cache -n loki bitnami/memcached -f memcached-overrides-results.yaml ``` The current Helm Chart only supports the chunk and results cache. @@ -57,13 +57,13 @@ To enable and configure Memcached: chunk_cache: enabled: true host: chunk-cache-memcached.loki.svc - service: memcache + service: memcached-client batch_size: 256 parallelism: 10 results_cache: enabled: true host: results-cache-memcached.loki.svc - service: memcache + service: memcached-client default_validity: 12h ``` 1. If the Loki configuration is used, modify the following three sections in From f3b32023eefd4f57e9a2b95ea2ca01cc582707c2 Mon Sep 17 00:00:00 2001 From: Shashwat Pathak Date: Thu, 23 Nov 2023 02:38:28 +0530 Subject: [PATCH 36/48] Update _index.md (#11265) Replacing the invalid (404) promtail-ec2.yaml with the valid one. **What this PR does / why we need it**: This PR does - updates the invalid url of the promtail-ec2.yaml with the valid one. Why it is needed - [While implementing this doc](https://grafana.com/docs/loki/latest/send-data/promtail/cloud/ec2/), the curl won't work. **Which issue(s) this PR fixes**: Fixes # No issue has been raised for this **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) Co-authored-by: J Stickler --- docs/sources/send-data/promtail/cloud/ec2/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/send-data/promtail/cloud/ec2/_index.md b/docs/sources/send-data/promtail/cloud/ec2/_index.md index 0ae942ab4c615..18434d734bac5 100644 --- a/docs/sources/send-data/promtail/cloud/ec2/_index.md +++ b/docs/sources/send-data/promtail/cloud/ec2/_index.md @@ -95,7 +95,7 @@ Now we're going to download the [Promtail configuration]({{< relref "../../../.. The file is also available as a gist at [cyriltovena/promtail-ec2.yaml][config gist]. ```bash -curl https://raw.githubusercontent.com/grafana/loki/main/docs/sources/clients/aws/ec2/promtail-ec2.yaml > ec2-promtail.yaml +curl https://raw.githubusercontent.com/grafana/loki/main/docs/sources/send-data/promtail/cloud/ec2/promtail-ec2.yaml > ec2-promtail.yaml vi ec2-promtail.yaml ``` From 1d90a8b95660ddba52d7e0c482d7e553d0801c29 Mon Sep 17 00:00:00 2001 From: Poyzan <31743851+poyzannur@users.noreply.github.com> Date: Thu, 23 Nov 2023 05:21:04 +0000 Subject: [PATCH 37/48] Fix bloom compactor startup duplicate metric registration (#11300) **What this PR does / why we need it**: Follow up from https://github.com/grafana/loki/pull/11285/, where Loki doesn't start up due to duplicate metric registration. ``` 2023-11-22 22:43:32 /src/loki/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go:276 +0x178 2023-11-22 22:43:32 github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/downloads.newMetrics({0x2a62040, 0x4001baa660}) 2023-11-22 22:43:32 /src/loki/pkg/storage/stores/shipper/indexshipper/downloads/metrics.go:21 +0xa4 2023-11-22 22:43:32 github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/downloads.NewTableManager({{0x4000cdae58, 0x18}, 0x45d964b800, 0x4e94914f0000, 0x0, {0xffff4f32e908, 0x40009c6750}}, 0x24192e0, {0x2a89f50?, 0x40004e7320}, ...) 2023-11-22 22:43:32 /src/loki/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go:92 +0xa8 2023-11-22 22:43:32 github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper.(*indexShipper).init(0x4001b9a480, {0x231b106?, 0x0?}, {0x2a80bf8?, 0x40018b9340?}, {0xffff4f32e908, 0x40009c6750}, 0x0?, {0x0?, 0x0?, ...}, ...) 2023-11-22 22:43:32 /src/loki/pkg/storage/stores/shipper/indexshipper/shipper.go:190 +0x28c 2023-11-22 22:43:32 github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper.NewIndexShipper({_, _}, {{0x4000685900, 0x19}, {0x4000cdae58, 0x18}, 0x4e94914f0000, 0x45d964b800, 0x0, {{0x2313cd3, ...}, ...}, ...}, ...) 2023-11-22 22:43:32 /src/loki/pkg/storage/stores/shipper/indexshipper/shipper.go:155 +0x194 2023-11-22 22:43:32 github.com/grafana/loki/pkg/bloomcompactor.New({{{{0x232535d, 0xa}, {0x232789e, 0xb}, {{...}, {...}, {...}, _}, {_, _}}, ...}, ...}, ...) 2023-11-22 22:43:32 /src/loki/pkg/bloomcompactor/bloomcompactor.go:142 +0x6d4 2023-11-22 22:43:32 github.com/grafana/loki/pkg/loki.(*Loki).initBloomCompactor(0x40006dc000) 2023-11-22 22:43:32 /src/loki/pkg/loki/modules.go:1404 +0x358 2023-11-22 22:43:32 github.com/grafana/dskit/modules.(*Manager).initModule(0x4000b48318, {0xffffe15d6f7d, 0x4}, 0x403b88?, 0x2313cb3?) 2023-11-22 22:43:32 /src/loki/vendor/github.com/grafana/dskit/modules/modules.go:136 +0x1a4 2023-11-22 22:43:32 github.com/grafana/dskit/modules.(*Manager).InitModuleServices(0x4000b1a718?, {0x400071c520, 0x1, 0x4000946ec0?}) 2023-11-22 22:43:32 /src/loki/vendor/github.com/grafana/dskit/modules/modules.go:108 +0xb4 2023-11-22 22:43:32 github.com/grafana/loki/pkg/loki.(*Loki).Run(0x40006dc000, {0x0?, {0x4?, 0x3?, 0x3ff40c0?}}) 2023-11-22 22:43:32 /src/loki/pkg/loki/loki.go:416 +0x74 2023-11-22 22:43:32 main.main() 2023-11-22 22:43:32 /src/loki/cmd/loki/main.go:114 +0xcdc 2023-11-22 22:43:33 panic: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "loki_tsdb_shipper_query_time_table_download_duration_seconds", help: "Time (in seconds) spent in downloading of files per table at query time", constLabels: {}, variableLabels: [{table }]} has different label names or a different help string ``` Plus a sneaky fix to our loki config, removing a deprecated field. See deprecation clean up here: https://github.com/grafana/loki/pull/11038/files **Which issue(s) this PR fixes**: Fixes # **Special notes for your reviewer**: **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [ ] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --- pkg/bloomcompactor/bloomcompactor.go | 2 +- production/docker/config/loki.yaml | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index cc11cc3e1ccf9..ccc331412e3bb 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -149,7 +149,7 @@ func New( return tsdb.OpenShippableTSDB(p) }, periodicConfig.GetIndexTableNumberRange(periodEndTime), - prometheus.WrapRegistererWithPrefix("loki_tsdb_shipper_", r), + prometheus.WrapRegistererWithPrefix("loki_bloom_compactor_tsdb_shipper_", r), logger, ) diff --git a/production/docker/config/loki.yaml b/production/docker/config/loki.yaml index e6a2f5fe31d84..6e4541164a235 100644 --- a/production/docker/config/loki.yaml +++ b/production/docker/config/loki.yaml @@ -97,9 +97,6 @@ limits_config: split_queries_by_interval: 15m volume_enabled: true -chunk_store_config: - max_look_back_period: 336h - table_manager: retention_deletes_enabled: true retention_period: 336h From 10a21cfe9bdfce3cab1eddbf0dc2d4673a0cbc2d Mon Sep 17 00:00:00 2001 From: J Stickler Date: Thu, 23 Nov 2023 02:22:43 -0500 Subject: [PATCH 38/48] [Docs] Update Helm installation to include backend (#11191) **What this PR does / why we need it**: Updates the Helm Charts documentation to include `backend` target Fixes broken links **Which issue(s) this PR fixes**: Fixes #11189 **Special notes for your reviewer**: I walked through this with @monodot earlier today. Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- docs/sources/send-data/promtail/pipelines.md | 2 +- .../send-data/promtail/stages/match.md | 2 +- .../install/helm/install-scalable/_index.md | 109 ++++++++++-------- 3 files changed, 63 insertions(+), 50 deletions(-) diff --git a/docs/sources/send-data/promtail/pipelines.md b/docs/sources/send-data/promtail/pipelines.md index 356e0ab4eec45..cee217a88987a 100644 --- a/docs/sources/send-data/promtail/pipelines.md +++ b/docs/sources/send-data/promtail/pipelines.md @@ -203,5 +203,5 @@ given log entry. ## Stages -Refer to the [Promtail Stages Configuration Reference]({{< relref "./stages/_index.md#prometheus-pipeline-stages" >}}) for the +Refer to the [Promtail Stages Configuration Reference]({{< relref "./stages/_index.md#promtail-pipeline-stages" >}}) for the schema on the various supported stages supported. diff --git a/docs/sources/send-data/promtail/stages/match.md b/docs/sources/send-data/promtail/stages/match.md index 5259395ab64f3..e9351350f4b3c 100644 --- a/docs/sources/send-data/promtail/stages/match.md +++ b/docs/sources/send-data/promtail/stages/match.md @@ -48,7 +48,7 @@ match: [...] ``` -Refer to the [Promtail Stages Configuration Reference]({{< relref "./_index.md#prometheus-pipeline-stages" >}}) for the +Refer to the [Promtail Stages Configuration Reference]({{< relref "./_index.md#promtail-pipeline-stages" >}}) for the schema on the various stages supported here. ### Example diff --git a/docs/sources/setup/install/helm/install-scalable/_index.md b/docs/sources/setup/install/helm/install-scalable/_index.md index 3abd69fd8f752..b0dbec64bf618 100644 --- a/docs/sources/setup/install/helm/install-scalable/_index.md +++ b/docs/sources/setup/install/helm/install-scalable/_index.md @@ -13,70 +13,83 @@ keywords: -This Helm Chart installation runs the Grafana Loki cluster within a Kubernetes cluster. +This Helm Chart deploys Grafana Loki on Kubernetes. -If object storge is configured, this chart configures Loki to run `read` and `write` targets in a [scalable mode]({{< relref "../../../../get-started/deployment-modes#simple-scalable" >}}), highly available architecture (3 replicas of each) designed to work with AWS S3 object storage. It will also configure meta-monitoring of metrics and logs. +This chart configures Loki to run `read`, `write`, and `backend` targets in a [scalable mode]({{< relref "../../../../get-started/deployment-modes#simple-scalable" >}}), highly available architecture designed to work with AWS S3 object storage. The chart also supports self-monitoring or meta-monitoring by deploying Grafana Agent to monitor Loki itself, by scraping its metrics and logs. -It is not possible to run the scalable mode with the `filesystem` storage. +The default Helm chart deploys the following components: +- Read component (3 replicas) +- Write component (3 replicas) +- Backend component (3 replicas) +- Loki Canary (1 DaemonSet) +- Gateway (1 NGINX replica) +- Minio (optional, if `minio.enabled=true`) +- Grafana Agent Operator + Grafana Agent (1 DaemonSet) - configured to monitor the Loki application. -**Before you begin:** + + +It is not recommended to run scalable mode with `filesystem` storage. + +**Prerequisites** - Helm 3 or above. See [Installing Helm](https://helm.sh/docs/intro/install/). - A running Kubernetes cluster. -- A Prometheus operator installation in case meta-monitoring should be used. -- Optionally a Memcached deployment for better performance. Consult the [caching section]({{< relref "../../../../operations/caching" >}}) on how to configure Memcached. +- (Optional) A Memcached deployment for better query performance. For information on configuring Memcached, refer to [caching section]({{< relref "../../../../operations/caching" >}}). + + +**To deploy Loki in simple scalable mode:** -**To deploy Loki in scalable mode:** 1. Add [Grafana's chart repository](https://github.com/grafana/helm-charts) to Helm: - ```bash - helm repo add grafana https://grafana.github.io/helm-charts - ``` + ```bash + helm repo add grafana https://grafana.github.io/helm-charts + ``` 1. Update the chart repository: - ```bash - helm repo update - ``` + ```bash + helm repo update + ``` 1. Configure the object storage: - - Create the configuration file `values.yaml`. The example below illustrates a s3 configuration: - - ```yaml - loki: - storage: - bucketNames: - chunks: chunks - ruler: ruler - admin: admin - type: s3 - s3: - endpoint: - region: - secretAccessKey: - accessKeyId: - s3ForcePathStyle: false - insecure: false - ``` - - Consult the [Reference]({{< relref "../reference" >}}) for configuring other storage providers. - - - If you're just trying things, you can use the following configuration instead, that sets MinIO as storage: - ```yaml - minio: - enabled: true - ``` + - Create the configuration file `values.yaml`. The example below illustrates a s3 configuration: + + ```yaml + loki: + storage: + bucketNames: + chunks: chunks + ruler: ruler + admin: admin + type: s3 + s3: + endpoint: + region: + secretAccessKey: + accessKeyId: + s3ForcePathStyle: false + insecure: false + ``` + + To configure other storage providers, refer to the [Helm Chart Reference]({{< relref "../reference" >}}). + + - If you're just trying things, you can use the following configuration, that sets MinIO as storage: + ```yaml + minio: + enabled: true + ``` 1. Install or upgrade the Loki deployment. - - To install: - - ```bash - helm install --values values.yaml loki grafana/loki - ``` - - To upgrade: + - To install: ```bash - helm upgrade --values values.yaml loki grafana/loki - ``` - + helm install --values values.yaml loki grafana/loki + ``` + - To upgrade: + ```bash + helm upgrade --values values.yaml loki grafana/loki + ``` + +## Next Steps +Configure an agent to [send log data to Loki](/docs/loki/latest/send-data/). From a971dcb7add60ab6d3b664bbf317b29b7c0d4ffb Mon Sep 17 00:00:00 2001 From: Jan Wozniak Date: Thu, 23 Nov 2023 11:27:37 +0100 Subject: [PATCH 39/48] Document caveats for 2.x to 3.x loki helm chart upgrades (#11199) **What this PR does / why we need it**: https://github.com/grafana/loki/issues/9427 discusses a few notable gotchas and differences between `v2.x` and `v3.x` helm charts. In this PR, I would like to propose aggregation of the information from the mentioned issue and appending that to the upgrade guide. **Which issue(s) this PR fixes**: see also: https://github.com/grafana/loki/issues/9427 **Special notes for your reviewer**: **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [x] Documentation added - [x] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` --------- Signed-off-by: Jan Wozniak Co-authored-by: J Stickler Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- .../setup/upgrade/upgrade-from-2x/index.md | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/sources/setup/upgrade/upgrade-from-2x/index.md b/docs/sources/setup/upgrade/upgrade-from-2x/index.md index 94b37a952cfc2..38c4756c00b1d 100644 --- a/docs/sources/setup/upgrade/upgrade-from-2x/index.md +++ b/docs/sources/setup/upgrade/upgrade-from-2x/index.md @@ -47,6 +47,28 @@ helm upgrade loki grafana/loki \ You will need to manually delete the existing stateful set for the above command to work. +#### Notable changes + +The `grafana/loki` chart used `Secret` as storage for configuration. You can set `.loki.existingSecretForConfig` to continue using `Secret` or migrate your configuration to a `ConfigMap`. Specifying the Loki config in `values.yaml` is still available. In the old chart it was under `.config`, the new chart allows specifying either `.loki.config` or `.loki.structuredConfig` which takes precedence. + +Similarly when using `extraVolumes`, the configuration is now nested under `.singleBinary.extraVolumes` or `.read.extraVolumes` + `.write.extraVolumes` if you decide to migrate to the Loki scalable deployment mode. + +#### Dependencies + +The `grafana/loki` chart was only used to install Loki. New charts since `v3.x` also bundle two dependencies - **minio** and **grafana-agent-operator**. If you have already installed either of these independently and wish to continue managing them separately, you can explicitly disable these dependencies in your `values.yaml` as shown in the following examples: +```yaml +minio: + enabled: false +``` + +```yaml +monitoring: + selfMonitoring: + enabled: false + grafanaAgent: + installOperator: false +``` + ### Upgrading from `grafana/loki-simple-scalable` As this chart is largely based off the `grafana/loki-simple-scalable` chart, you should be able to use your existing `values.yaml` file and just upgrade to the new chart name. For example, if you installed the `grafana/loki-simple-scalable` chart as `loki` in the namespace `loki`, your upgrade would be: From 21a07773ba23bfe2005ac4717c6a4612de486218 Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Thu, 23 Nov 2023 12:40:25 +0100 Subject: [PATCH 40/48] operator: Add automatic stream sharding support (#11091) Co-authored-by: Robert Jacob --- operator/CHANGELOG.md | 1 + operator/apis/loki/v1/lokistack_types.go | 8 + .../loki-operator.clusterserviceversion.yaml | 14 +- .../loki.grafana.com_lokistacks.yaml | 12 + .../loki-operator.clusterserviceversion.yaml | 14 +- .../loki.grafana.com_lokistacks.yaml | 12 + .../loki-operator.clusterserviceversion.yaml | 14 +- .../loki.grafana.com_lokistacks.yaml | 12 + .../bases/loki.grafana.com_lokistacks.yaml | 12 + .../loki-operator.clusterserviceversion.yaml | 12 + .../loki-operator.clusterserviceversion.yaml | 12 + .../loki-operator.clusterserviceversion.yaml | 12 + operator/docs/operator/api.md | 13 + .../manifests/internal/config/build_test.go | 398 ++++++++++++++++-- .../internal/config/loki-config.yaml | 5 + .../internal/config/loki-runtime-config.yaml | 5 + operator/internal/manifests/internal/sizes.go | 12 +- 17 files changed, 518 insertions(+), 50 deletions(-) diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 8ee1f64ed48f5..5728306b7f9f9 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [11091](https://github.com/grafana/loki/pull/11091) **periklis**: Add automatic stream sharding support - [11022](https://github.com/grafana/loki/pull/11022) **JoaoBraveCoding**: Remove outdated BoltDB dashboards - [10932](https://github.com/grafana/loki/pull/10932) **JoaoBraveCoding**: Adds new value v13 to schema - [11232](https://github.com/grafana/loki/pull/11232) **periklis**: Update dependencies and dev tools diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go index 382b6f1795d85..29e09887b3dfc 100644 --- a/operator/apis/loki/v1/lokistack_types.go +++ b/operator/apis/loki/v1/lokistack_types.go @@ -690,6 +690,14 @@ type IngestionLimitSpec struct { // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Max Line Size" MaxLineSize int32 `json:"maxLineSize,omitempty"` + // PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should + // target applying automatic stream sharding. Units MB. + // + // +optional + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Per Stream Desired Rate (in MB)" + PerStreamDesiredRate int32 `json:"perStreamDesiredRate,omitempty"` + // PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. // // +optional diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index 8a315a261bd09..c432c376acca6 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.5.0 - createdAt: "2023-11-03T11:44:16Z" + createdAt: "2023-11-23T11:25:33Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -336,6 +336,12 @@ spec: path: limits.global.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.global.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) @@ -420,6 +426,12 @@ spec: path: limits.tenants.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.tenants.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml index 11d653fdd332f..267d1e292a4c0 100644 --- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -135,6 +135,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. @@ -262,6 +268,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index 08b6919daab9a..9264955ed23fc 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.5.0 - createdAt: "2023-11-03T11:44:14Z" + createdAt: "2023-11-23T11:25:30Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown @@ -336,6 +336,12 @@ spec: path: limits.global.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.global.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) @@ -420,6 +426,12 @@ spec: path: limits.tenants.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.tenants.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml index 5e77c9ba4f1ac..5e21aa0b11d5a 100644 --- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml @@ -135,6 +135,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. @@ -262,6 +268,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index f9828c5051644..2139a813749ac 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:0.1.0 - createdAt: "2023-11-03T11:44:18Z" + createdAt: "2023-11-23T11:25:35Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements @@ -349,6 +349,12 @@ spec: path: limits.global.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.global.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) @@ -433,6 +439,12 @@ spec: path: limits.tenants.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.tenants.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml index def6c0ed0777d..087fd1a97cddf 100644 --- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml +++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml @@ -135,6 +135,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. @@ -262,6 +268,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml index 56c33d835cd7b..199836ca130e3 100644 --- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml +++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml @@ -117,6 +117,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. @@ -244,6 +250,12 @@ spec: on ingestion path. Units in Bytes. format: int32 type: integer + perStreamDesiredRate: + description: PerStreamDesiredRate defines the desired + ingestion rate per second that LokiStack should target + applying automatic stream sharding. Units MB. + format: int32 + type: integer perStreamRateLimit: description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml index 71af0623e6223..a8db185fa67ae 100644 --- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml @@ -249,6 +249,12 @@ spec: path: limits.global.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.global.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) @@ -333,6 +339,12 @@ spec: path: limits.tenants.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.tenants.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml index a3ffabdea5e83..49f5cdcb809d1 100644 --- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml @@ -249,6 +249,12 @@ spec: path: limits.global.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.global.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) @@ -333,6 +339,12 @@ spec: path: limits.tenants.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.tenants.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml index ab3143fc2b60e..9455549c3b40e 100644 --- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml @@ -261,6 +261,12 @@ spec: path: limits.global.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.global.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) @@ -345,6 +351,12 @@ spec: path: limits.tenants.ingestion.maxLineSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number + - description: PerStreamDesiredRate defines the desired ingestion rate per second + that LokiStack should target applying automatic stream sharding. Units MB. + displayName: Per Stream Desired Rate (in MB) + path: limits.tenants.ingestion.perStreamDesiredRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number - description: PerStreamRateLimit defines the maximum byte rate per second per stream. Units MB. displayName: Maximum byte rate per second per stream (in MB) diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md index 20e4b0d6adc2a..e415bc9daa214 100644 --- a/operator/docs/operator/api.md +++ b/operator/docs/operator/api.md @@ -1172,6 +1172,19 @@ int32 +perStreamDesiredRate
+ +int32 + + + +(Optional) +

PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should +target applying automatic stream sharding. Units MB.

+ + + + perStreamRateLimit
int32 diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go index 8b8ec4a2de156..fd254b7ee1e9d 100644 --- a/operator/internal/manifests/internal/config/build_test.go +++ b/operator/internal/manifests/internal/config/build_test.go @@ -108,10 +108,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -193,8 +196,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -363,10 +367,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -453,8 +460,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -569,8 +577,9 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) { MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, // making it nil so that the template is not generated and error is returned QueryLimits: nil, @@ -728,10 +737,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -867,8 +879,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -1084,10 +1097,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -1223,8 +1239,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -1441,10 +1458,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -1593,8 +1613,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -1836,10 +1857,13 @@ limits_config: priority: 1 period: 3d max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -1931,8 +1955,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -2160,10 +2185,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 2m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -2325,8 +2353,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -2598,10 +2627,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -2720,8 +2752,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -2921,10 +2954,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 2m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -3114,8 +3150,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -3416,10 +3453,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -3502,8 +3542,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -3675,10 +3716,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -3761,8 +3805,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -3935,10 +3980,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -4020,8 +4068,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -4196,10 +4245,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -4286,8 +4338,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -4488,10 +4541,13 @@ limits_config: cardinality_limit: 100000 max_streams_matchers_per_query: 1000 max_cache_freshness_per_query: 10m - per_stream_rate_limit: 3MB - per_stream_rate_limit_burst: 15MB split_queries_by_interval: 30m query_timeout: 1m + per_stream_rate_limit: 5MB + per_stream_rate_limit_burst: 15MB + shard_streams: + enabled: true + desired_rate: 3MB allow_structured_metadata: true memberlist: abort_if_cluster_join_fails: true @@ -4578,8 +4634,9 @@ overrides: MaxLabelNamesPerSeries: 30, MaxGlobalStreamsPerTenant: 0, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, + PerStreamDesiredRate: 3, }, QueryLimits: &lokiv1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 5000, @@ -4683,6 +4740,261 @@ overrides: require.YAMLEq(t, expRCfg, string(rCfg)) } +func TestBuild_ConfigAndRuntimeConfig_WithManualPerStreamRateLimits(t *testing.T) { + expCfg := ` +--- +auth_enabled: true +chunk_store_config: + chunk_cache_config: + embedded_cache: + enabled: true + max_size_mb: 500 +common: + storage: + s3: + s3: http://test.default.svc.cluster.local.:9000 + bucketnames: loki + region: us-east + access_key_id: test + secret_access_key: test123 + s3forcepathstyle: true + compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095 + ring: + kvstore: + store: memberlist + heartbeat_period: 5s + heartbeat_timeout: 1m + instance_port: 9095 +compactor: + compaction_interval: 2h + working_directory: /tmp/loki/compactor +frontend: + tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 + compress_responses: true + max_outstanding_per_tenant: 4096 + log_queries_longer_than: 5s +frontend_worker: + frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095 + grpc_client_config: + max_send_msg_size: 104857600 + match_max_concurrent: true +ingester: + chunk_block_size: 262144 + chunk_encoding: snappy + chunk_idle_period: 1h + chunk_retain_period: 5m + chunk_target_size: 2097152 + flush_op_timeout: 10m + lifecycler: + final_sleep: 0s + join_after: 30s + num_tokens: 512 + ring: + replication_factor: 1 + max_chunk_age: 2h + max_transfer_retries: 0 + wal: + enabled: true + dir: /tmp/wal + replay_memory_ceiling: 2500 +ingester_client: + grpc_client_config: + max_recv_msg_size: 67108864 + remote_timeout: 1s +# NOTE: Keep the order of keys as in Loki docs +# to enable easy diffs when vendoring newer +# Loki releases. +# (See https://grafana.com/docs/loki/latest/configuration/#limits_config) +# +# Values for not exposed fields are taken from the grafana/loki production +# configuration manifests. +# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet) +limits_config: + ingestion_rate_strategy: global + ingestion_rate_mb: 4 + ingestion_burst_size_mb: 6 + max_label_name_length: 1024 + max_label_value_length: 2048 + max_label_names_per_series: 30 + reject_old_samples: true + reject_old_samples_max_age: 168h + creation_grace_period: 10m + enforce_metric_name: false + # Keep max_streams_per_user always to 0 to default + # using max_global_streams_per_user always. + # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73) + max_streams_per_user: 0 + max_line_size: 256000 + max_entries_limit_per_query: 5000 + max_global_streams_per_user: 0 + max_chunks_per_query: 2000000 + max_query_length: 721h + max_query_parallelism: 32 + max_query_series: 500 + cardinality_limit: 100000 + max_streams_matchers_per_query: 1000 + max_cache_freshness_per_query: 10m + per_stream_rate_limit: 3MB + per_stream_rate_limit_burst: 15MB + split_queries_by_interval: 30m + tsdb_max_query_parallelism: 512 + query_timeout: 1m + allow_structured_metadata: true +memberlist: + abort_if_cluster_join_fails: true + advertise_port: 7946 + bind_port: 7946 + join_members: + - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946 + max_join_backoff: 1m + max_join_retries: 10 + min_join_backoff: 1s +querier: + engine: + max_look_back_period: 30s + extra_query_delay: 0s + max_concurrent: 2 + query_ingesters_within: 3h + tail_max_duration: 1h +query_range: + align_queries_with_step: true + cache_results: true + max_retries: 5 + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 500 + parallelise_shardable_queries: true +schema_config: + configs: + - from: "2020-10-01" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v11 + store: boltdb-shipper +server: + graceful_shutdown_timeout: 5s + grpc_server_min_time_between_pings: '10s' + grpc_server_ping_without_stream_allowed: true + grpc_server_max_concurrent_streams: 1000 + grpc_server_max_recv_msg_size: 104857600 + grpc_server_max_send_msg_size: 104857600 + http_listen_port: 3100 + http_server_idle_timeout: 30s + http_server_read_timeout: 30s + http_server_write_timeout: 10m0s + log_level: info +storage_config: + boltdb_shipper: + active_index_directory: /tmp/loki/index + cache_location: /tmp/loki/index_cache + cache_ttl: 24h + resync_interval: 5m + shared_store: s3 + index_gateway_client: + server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095 +tracing: + enabled: false +analytics: + reporting_enabled: true +` + expRCfg := ` +--- +overrides: +` + opts := Options{ + Stack: lokiv1.LokiStackSpec{ + Replication: &lokiv1.ReplicationSpec{ + Factor: 1, + }, + Limits: &lokiv1.LimitsSpec{ + Global: &lokiv1.LimitsTemplateSpec{ + IngestionLimits: &lokiv1.IngestionLimitSpec{ + IngestionRate: 4, + IngestionBurstSize: 6, + MaxLabelNameLength: 1024, + MaxLabelValueLength: 2048, + MaxLabelNamesPerSeries: 30, + MaxGlobalStreamsPerTenant: 0, + MaxLineSize: 256000, + PerStreamRateLimit: 3, + PerStreamRateLimitBurst: 15, + }, + QueryLimits: &lokiv1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 5000, + MaxChunksPerQuery: 2000000, + MaxQuerySeries: 500, + QueryTimeout: "1m", + CardinalityLimit: 100000, + }, + }, + }, + }, + Namespace: "test-ns", + Name: "test", + Compactor: Address{ + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + FrontendWorker: Address{ + FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + GossipRing: GossipRing{ + InstancePort: 9095, + BindPort: 7946, + MembersDiscoveryAddr: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local", + }, + Querier: Address{ + Protocol: "http", + FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local", + Port: 3100, + }, + IndexGateway: Address{ + FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, + }, + StorageDirectory: "/tmp/loki", + MaxConcurrent: MaxConcurrent{ + AvailableQuerierCPUCores: 2, + }, + WriteAheadLog: WriteAheadLog{ + Directory: "/tmp/wal", + IngesterMemoryRequest: 5000, + }, + ObjectStorage: storage.Options{ + SharedStore: lokiv1.ObjectStorageSecretS3, + S3: &storage.S3StorageConfig{ + Endpoint: "http://test.default.svc.cluster.local.:9000", + Region: "us-east", + Buckets: "loki", + AccessKeyID: "test", + AccessKeySecret: "test123", + }, + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-01", + }, + }, + }, + Shippers: []string{"boltdb"}, + EnableRemoteReporting: true, + HTTPTimeouts: HTTPTimeoutConfig{ + IdleTimeout: 30 * time.Second, + ReadTimeout: 30 * time.Second, + WriteTimeout: 10 * time.Minute, + }, + } + cfg, rCfg, err := Build(opts) + require.NoError(t, err) + require.YAMLEq(t, expCfg, string(cfg)) + require.YAMLEq(t, expRCfg, string(rCfg)) +} + func defaultOptions() Options { return Options{ Stack: lokiv1.LokiStackSpec{ diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml index e192083ab9900..0ea4348367835 100644 --- a/operator/internal/manifests/internal/config/loki-config.yaml +++ b/operator/internal/manifests/internal/config/loki-config.yaml @@ -204,6 +204,11 @@ limits_config: per_stream_rate_limit: {{ .Stack.Limits.Global.IngestionLimits.PerStreamRateLimit }}MB per_stream_rate_limit_burst: {{ .Stack.Limits.Global.IngestionLimits.PerStreamRateLimitBurst }}MB split_queries_by_interval: 30m +{{- with .Stack.Limits.Global.IngestionLimits.PerStreamDesiredRate }} + shard_streams: + enabled: true + desired_rate: {{ . }}MB +{{- end }} allow_structured_metadata: true {{- with .GossipRing }} memberlist: diff --git a/operator/internal/manifests/internal/config/loki-runtime-config.yaml b/operator/internal/manifests/internal/config/loki-runtime-config.yaml index ca62d0a783db9..8b2fe60b23f8e 100644 --- a/operator/internal/manifests/internal/config/loki-runtime-config.yaml +++ b/operator/internal/manifests/internal/config/loki-runtime-config.yaml @@ -32,6 +32,11 @@ overrides: {{- if $l.PerStreamRateLimitBurst }} per_stream_rate_limit_burst: {{ $l.PerStreamRateLimitBurst }}MB {{- end }} + {{- with $l.PerStreamDesiredRate }} + shard_streams: + enabled: true + desired_rate: {{ . }}MB + {{- end}} {{- end -}} {{- if $l := $spec.QueryLimits -}} {{- if $l.MaxEntriesLimitPerQuery }} diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go index 01c0f20eea6da..be5ac2eefb018 100644 --- a/operator/internal/manifests/internal/sizes.go +++ b/operator/internal/manifests/internal/sizes.go @@ -241,7 +241,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{ MaxLabelValueLength: 2048, MaxLabelNamesPerSeries: 30, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamDesiredRate: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, }, QueryLimits: &lokiv1.QueryLimitSpec{ @@ -296,7 +297,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{ MaxLabelValueLength: 2048, MaxLabelNamesPerSeries: 30, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamDesiredRate: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, }, QueryLimits: &lokiv1.QueryLimitSpec{ @@ -354,7 +356,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{ MaxLabelValueLength: 2048, MaxLabelNamesPerSeries: 30, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamDesiredRate: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, }, QueryLimits: &lokiv1.QueryLimitSpec{ @@ -412,7 +415,8 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{ MaxLabelValueLength: 2048, MaxLabelNamesPerSeries: 30, MaxLineSize: 256000, - PerStreamRateLimit: 3, + PerStreamDesiredRate: 3, + PerStreamRateLimit: 5, PerStreamRateLimitBurst: 15, }, QueryLimits: &lokiv1.QueryLimitSpec{ From 7200c4b95c163bdad4d5099d62b8f0910b93847a Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Thu, 23 Nov 2023 15:11:35 +0100 Subject: [PATCH 41/48] Per-tenant n-gram length and skip factor, and bloom false-positive rate (#11290) **What this PR does / why we need it**: This PR adds three per tenant configs: - `bloom_ngram_length`: Configures the n-gram length. - `bloom_ngram_skip`: Configures the n-gram skip factor. - `bloom_false_positive_rate`: Configures the target false-positive rate of the scalable bloom filters. Since the n-gram length and skip factor are now configurable, these values are written into the block metadata so queriers can use the n-gram settings used when creating the blocks to build n-grams compatible with the block. --- docs/sources/configure/_index.md | 12 +++++++ pkg/bloomcompactor/TODO.md | 2 -- pkg/bloomcompactor/bloomcompactor.go | 35 +++++++++++++++----- pkg/bloomcompactor/config.go | 3 ++ pkg/bloomcompactor/sharding_test.go | 17 ++-------- pkg/storage/bloom/v1/bloom_tokenizer.go | 14 +++++--- pkg/storage/bloom/v1/bloom_tokenizer_test.go | 16 +++++---- pkg/storage/bloom/v1/builder.go | 6 ++-- pkg/storage/bloom/v1/builder_test.go | 16 ++++++--- pkg/storage/bloom/v1/index.go | 15 ++++++--- pkg/validation/limits.go | 18 ++++++++++ tools/tsdb/bloom-tester/lib.go | 11 ++++-- tools/tsdb/bloom-tester/readlib.go | 16 ++++----- 13 files changed, 124 insertions(+), 57 deletions(-) diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index f6fb4aefe9182..5c3dba3d15de2 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -2978,6 +2978,18 @@ shard_streams: # CLI flag: -bloom-compactor.enable-compaction [bloom_compactor_enable_compaction: | default = false] +# Length of the n-grams created when computing blooms from log lines. +# CLI flag: -bloom-compactor.ngram-length +[bloom_ngram_length: | default = 4] + +# Skip factor for the n-grams created when computing blooms from log lines. +# CLI flag: -bloom-compactor.ngram-skip +[bloom_ngram_skip: | default = 0] + +# Scalable Bloom Filter desired false-positive rate. +# CLI flag: -bloom-compactor.false-positive-rate +[bloom_false_positive_rate: | default = 0.01] + # Allow user to send structured metadata in push payload. # CLI flag: -validation.allow-structured-metadata [allow_structured_metadata: | default = false] diff --git a/pkg/bloomcompactor/TODO.md b/pkg/bloomcompactor/TODO.md index 479f5399a350d..2d963841b854c 100644 --- a/pkg/bloomcompactor/TODO.md +++ b/pkg/bloomcompactor/TODO.md @@ -1,4 +1,2 @@ -* Adding falsePosRate of sbf into config -* Add per-tenant bool to enable compaction * Use tarGz, untarGz before uploding blocks to storage * Introduce back `maxLookBackPeriod` as `RejectOldSamplesMaxAge` limit in distributors diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index ccc331412e3bb..5ffd9d7dc6a7c 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -63,7 +63,6 @@ import ( // TODO: Make a constants file somewhere const ( - fpRate = 0.01 bloomFileName = "bloom" seriesFileName = "series" ) @@ -351,7 +350,9 @@ func (c *Compactor) compactTenant(ctx context.Context, logger log.Logger, sc sto } // Tokenizer is not thread-safe so we need one per goroutine. - bt, _ := v1.NewBloomTokenizer(c.reg) + NGramLength := c.limits.BloomNGramLength(tenant) + NGramSkip := c.limits.BloomNGramSkip(tenant) + bt, _ := v1.NewBloomTokenizer(c.reg, NGramLength, NGramSkip) // TODO: Use ForEachConcurrent? errs := multierror.New() @@ -457,7 +458,14 @@ func makeChunkRefs(chksMetas []tsdbindex.ChunkMeta, tenant string, fp model.Fing } // TODO Revisit this step once v1/bloom lib updated to combine blooms in the same series -func buildBloomBlock(ctx context.Context, logger log.Logger, bloomForChks v1.SeriesWithBloom, job Job, workingDir string) (bloomshipper.Block, error) { +func buildBloomBlock( + ctx context.Context, + logger log.Logger, + options v1.BlockOptions, + bloomForChks v1.SeriesWithBloom, + job Job, + workingDir string, +) (bloomshipper.Block, error) { // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). if err := ctx.Err(); err != nil { return bloomshipper.Block{}, err @@ -466,7 +474,7 @@ func buildBloomBlock(ctx context.Context, logger log.Logger, bloomForChks v1.Ser localDst := createLocalDirName(workingDir, job) // write bloom to a local dir - builder, err := v1.NewBlockBuilder(v1.NewBlockOptions(), v1.NewDirectoryBlockWriter(localDst)) + builder, err := v1.NewBlockBuilder(options, v1.NewDirectoryBlockWriter(localDst)) if err != nil { level.Error(logger).Log("creating builder", err) return bloomshipper.Block{}, err @@ -514,9 +522,16 @@ func createLocalDirName(workingDir string, job Job) string { } // Compacts given list of chunks, uploads them to storage and returns a list of bloomBlocks -func CompactNewChunks(ctx context.Context, logger log.Logger, job Job, - chunks []chunk.Chunk, bt *v1.BloomTokenizer, - bloomShipperClient bloomshipper.Client, dst string) ([]bloomshipper.Block, error) { +func CompactNewChunks( + ctx context.Context, + logger log.Logger, + job Job, + chunks []chunk.Chunk, + bt *v1.BloomTokenizer, + fpRate float64, + bloomShipperClient bloomshipper.Client, + dst string, +) ([]bloomshipper.Block, error) { // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). if err := ctx.Err(); err != nil { return nil, err @@ -536,7 +551,8 @@ func CompactNewChunks(ctx context.Context, logger log.Logger, job Job, bt.PopulateSeriesWithBloom(&bloomForChks, chunks) // Build and upload bloomBlock to storage - blocks, err := buildBloomBlock(ctx, logger, bloomForChks, job, dst) + blockOptions := v1.NewBlockOptions(bt.GetNGramLength(), bt.GetNGramSkip()) + blocks, err := buildBloomBlock(ctx, logger, blockOptions, bloomForChks, job, dst) if err != nil { level.Error(logger).Log("building bloomBlocks", err) return nil, err @@ -579,7 +595,8 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, return err } - storedBlocks, err := CompactNewChunks(ctx, logger, job, chks, bt, bloomShipperClient, c.cfg.WorkingDirectory) + fpRate := c.limits.BloomFalsePositiveRate(job.Tenant()) + storedBlocks, err := CompactNewChunks(ctx, logger, job, chks, bt, fpRate, bloomShipperClient, c.cfg.WorkingDirectory) if err != nil { return level.Error(logger).Log("compacting new chunks", err) } diff --git a/pkg/bloomcompactor/config.go b/pkg/bloomcompactor/config.go index 57721850d2927..3bdf65d3e68aa 100644 --- a/pkg/bloomcompactor/config.go +++ b/pkg/bloomcompactor/config.go @@ -44,4 +44,7 @@ type Limits interface { BloomCompactorMaxTableAge(tenantID string) time.Duration BloomCompactorMinTableAge(tenantID string) time.Duration BloomCompactorEnabled(tenantID string) bool + BloomNGramLength(tenantID string) int + BloomNGramSkip(tenantID string) int + BloomFalsePositiveRate(tenantID string) float64 } diff --git a/pkg/bloomcompactor/sharding_test.go b/pkg/bloomcompactor/sharding_test.go index 1bd7b198648e1..69ef14bb9d272 100644 --- a/pkg/bloomcompactor/sharding_test.go +++ b/pkg/bloomcompactor/sharding_test.go @@ -13,7 +13,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" - "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/downloads" util_log "github.com/grafana/loki/pkg/util/log" lokiring "github.com/grafana/loki/pkg/util/ring" "github.com/grafana/loki/pkg/validation" @@ -44,7 +43,7 @@ func TestShuffleSharding(t *testing.T) { require.NoError(t, ringManager.StartAsync(context.Background())) sharding := NewShuffleShardingStrategy(ringManager.Ring, ringManager.RingLifecycler, mockLimits{ - Limits: overrides, + Overrides: overrides, bloomCompactorShardSize: shardSize, }) @@ -128,22 +127,10 @@ func TestShuffleSharding(t *testing.T) { } type mockLimits struct { - downloads.Limits + *validation.Overrides bloomCompactorShardSize int } func (m mockLimits) BloomCompactorShardSize(_ string) int { return m.bloomCompactorShardSize } - -func (m mockLimits) BloomCompactorMaxTableAge(_ string) time.Duration { - return 0 -} - -func (m mockLimits) BloomCompactorMinTableAge(_ string) time.Duration { - return 0 -} - -func (m mockLimits) BloomCompactorEnabled(_ string) bool { - return false -} diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index 93830cac8953d..1e0e15125bfb8 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -33,20 +33,18 @@ type BloomTokenizer struct { } const CacheSize = 150000 -const DefaultNGramLength = 4 -const DefaultNGramSkip = 0 // NewBloomTokenizer returns a new instance of the Bloom Tokenizer. // Warning: the tokens returned use the same byte slice to reduce allocations. This has two consequences: // 1) The token slices generated must not be mutated externally // 2) The token slice must not be used after the next call to `Tokens()` as it will repopulate the slice. // 2) This is not thread safe. -func NewBloomTokenizer(reg prometheus.Registerer) (*BloomTokenizer, error) { +func NewBloomTokenizer(reg prometheus.Registerer, NGramLength, NGramSkip int) (*BloomTokenizer, error) { t := &BloomTokenizer{ metrics: newMetrics(reg), } t.cache = make(map[string]interface{}, CacheSize) - t.lineTokenizer = NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip) // default to 4-grams, no skip + t.lineTokenizer = NewNGramTokenizer(NGramLength, NGramSkip) level.Info(util_log.Logger).Log("bloom tokenizer created") @@ -57,6 +55,14 @@ func (bt *BloomTokenizer) SetLineTokenizer(t *NGramTokenizer) { bt.lineTokenizer = t } +func (bt *BloomTokenizer) GetNGramLength() uint64 { + return uint64(bt.lineTokenizer.N) +} + +func (bt *BloomTokenizer) GetNGramSkip() uint64 { + return uint64(bt.lineTokenizer.Skip) +} + // TODO: Something real here with metrics func newMetrics(_ prometheus.Registerer) *metrics { return &metrics{} diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index 050aa61a7e60e..904a257ef7679 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -2,6 +2,7 @@ package v1 import ( "fmt" + "testing" "time" "github.com/prometheus/prometheus/model/labels" @@ -11,8 +12,6 @@ import ( "github.com/grafana/loki/pkg/push" "github.com/grafana/loki/pkg/storage/chunk" - "testing" - "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -21,6 +20,11 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +const ( + DefaultNGramLength = 4 + DefaultNGramSkip = 0 +) + var ( four = NewNGramTokenizer(4, 0) ) @@ -69,7 +73,7 @@ func TestPrefixedKeyCreation(t *testing.T) { } func TestSetLineTokenizer(t *testing.T) { - bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) + bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) // Validate defaults require.Equal(t, bt.lineTokenizer.N, DefaultNGramLength) @@ -83,7 +87,7 @@ func TestSetLineTokenizer(t *testing.T) { func TestPopulateSeriesWithBloom(t *testing.T) { var testLine = "this is a log line" - bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) + bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8) var lbsList []labels.Labels @@ -128,7 +132,7 @@ func TestPopulateSeriesWithBloom(t *testing.T) { } func BenchmarkMapClear(b *testing.B) { - bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) + bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) for i := 0; i < b.N; i++ { for k := 0; k < CacheSize; k++ { bt.cache[fmt.Sprint(k)] = k @@ -139,7 +143,7 @@ func BenchmarkMapClear(b *testing.B) { } func BenchmarkNewMap(b *testing.B) { - bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer) + bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) for i := 0; i < b.N; i++ { for k := 0; k < CacheSize; k++ { bt.cache[fmt.Sprint(k)] = k diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go index 7608c85245b07..34550940b7b1d 100644 --- a/pkg/storage/bloom/v1/builder.go +++ b/pkg/storage/bloom/v1/builder.go @@ -30,10 +30,12 @@ type BlockBuilder struct { blooms *BloomBlockBuilder } -func NewBlockOptions() BlockOptions { +func NewBlockOptions(NGramLength, NGramSkip uint64) BlockOptions { return BlockOptions{ schema: Schema{ - version: byte(1), + version: byte(1), + nGramLength: NGramLength, + nGramSkip: NGramSkip, }, SeriesPageSize: 100, BloomPageSize: 10 << 10, // 0.01MB diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go index 2a547cd479bf0..0ea6f6451ebac 100644 --- a/pkg/storage/bloom/v1/builder_test.go +++ b/pkg/storage/bloom/v1/builder_test.go @@ -87,12 +87,16 @@ func TestBlockBuilderRoundTrip(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { + schema := Schema{ + version: DefaultSchemaVersion, + encoding: chunkenc.EncSnappy, + nGramLength: 10, + nGramSkip: 2, + } + builder, err := NewBlockBuilder( BlockOptions{ - schema: Schema{ - version: DefaultSchemaVersion, - encoding: chunkenc.EncSnappy, - }, + schema: schema, SeriesPageSize: 100, BloomPageSize: 10 << 10, }, @@ -106,6 +110,10 @@ func TestBlockBuilderRoundTrip(t *testing.T) { block := NewBlock(tc.reader) querier := NewBlockQuerier(block) + err = block.LoadHeaders() + require.Nil(t, err) + require.Equal(t, block.blooms.schema, schema) + for i := 0; i < len(data); i++ { require.Equal(t, true, querier.Next(), "on iteration %d with error %v", i, querier.Err()) got := querier.At() diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go index 98e170b183e7c..cc168bc06a6d0 100644 --- a/pkg/storage/bloom/v1/index.go +++ b/pkg/storage/bloom/v1/index.go @@ -12,14 +12,15 @@ import ( ) type Schema struct { - version byte - encoding chunkenc.Encoding + version byte + encoding chunkenc.Encoding + nGramLength, nGramSkip uint64 } // byte length func (s Schema) Len() int { - // magic number + version + encoding - return 4 + 1 + 1 + // magic number + version + encoding + ngram length + ngram skip + return 4 + 1 + 1 + 8 + 8 } func (s *Schema) DecompressorPool() chunkenc.ReaderPool { @@ -35,6 +36,9 @@ func (s *Schema) Encode(enc *encoding.Encbuf) { enc.PutBE32(magicNumber) enc.PutByte(s.version) enc.PutByte(byte(s.encoding)) + enc.PutBE64(s.nGramLength) + enc.PutBE64(s.nGramSkip) + } func (s *Schema) DecodeFrom(r io.ReadSeeker) error { @@ -64,6 +68,9 @@ func (s *Schema) Decode(dec *encoding.Decbuf) error { return errors.Wrap(err, "parsing encoding") } + s.nGramLength = dec.Be64() + s.nGramSkip = dec.Be64() + return dec.Err() } diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 0a482b2c0401f..250fa9575c470 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -186,6 +186,9 @@ type Limits struct { BloomCompactorMaxTableAge time.Duration `yaml:"bloom_compactor_max_table_age" json:"bloom_compactor_max_table_age"` BloomCompactorMinTableAge time.Duration `yaml:"bloom_compactor_min_table_age" json:"bloom_compactor_min_table_age"` BloomCompactorEnabled bool `yaml:"bloom_compactor_enable_compaction" json:"bloom_compactor_enable_compaction"` + BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length"` + BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip"` + BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate"` AllowStructuredMetadata bool `yaml:"allow_structured_metadata,omitempty" json:"allow_structured_metadata,omitempty" doc:"description=Allow user to send structured metadata in push payload."` MaxStructuredMetadataSize flagext.ByteSize `yaml:"max_structured_metadata_size" json:"max_structured_metadata_size" doc:"description=Maximum size accepted for structured metadata per log line."` @@ -303,6 +306,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&l.BloomCompactorMaxTableAge, "bloom-compactor.max-table-age", 7*24*time.Hour, "The maximum age of a table before it is compacted. Do not compact tables older than the the configured time. Default to 7 days. 0s means no limit.") f.DurationVar(&l.BloomCompactorMinTableAge, "bloom-compactor.min-table-age", 1*time.Hour, "The minimum age of a table before it is compacted. Do not compact tables newer than the the configured time. Default to 1 hour. 0s means no limit. This is useful to avoid compacting tables that will be updated with out-of-order writes.") f.BoolVar(&l.BloomCompactorEnabled, "bloom-compactor.enable-compaction", false, "Whether to compact chunks into bloom filters.") + f.IntVar(&l.BloomNGramLength, "bloom-compactor.ngram-length", 4, "Length of the n-grams created when computing blooms from log lines.") + f.IntVar(&l.BloomNGramSkip, "bloom-compactor.ngram-skip", 0, "Skip factor for the n-grams created when computing blooms from log lines.") + f.Float64Var(&l.BloomFalsePositiveRate, "bloom-compactor.false-positive-rate", 0.01, "Scalable Bloom Filter desired false-positive rate.") l.ShardStreams = &shardstreams.Config{} l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f) @@ -802,6 +808,18 @@ func (o *Overrides) BloomCompactorEnabled(userID string) bool { return o.getOverridesForUser(userID).BloomCompactorEnabled } +func (o *Overrides) BloomNGramLength(userID string) int { + return o.getOverridesForUser(userID).BloomNGramLength +} + +func (o *Overrides) BloomNGramSkip(userID string) int { + return o.getOverridesForUser(userID).BloomNGramSkip +} + +func (o *Overrides) BloomFalsePositiveRate(userID string) float64 { + return o.getOverridesForUser(userID).BloomFalsePositiveRate +} + func (o *Overrides) AllowStructuredMetadata(userID string) bool { return o.getOverridesForUser(userID).AllowStructuredMetadata } diff --git a/tools/tsdb/bloom-tester/lib.go b/tools/tsdb/bloom-tester/lib.go index 36926bcd30343..5b997d903a373 100644 --- a/tools/tsdb/bloom-tester/lib.go +++ b/tools/tsdb/bloom-tester/lib.go @@ -36,6 +36,11 @@ import ( "github.com/grafana/loki/tools/tsdb/helpers" ) +const ( + DefaultNGramLength = 4 + DefaultNGramSkip = 0 +) + func execute() { conf, svc, bucket, err := helpers.Setup() helpers.ExitErr("setting up", err) @@ -259,9 +264,9 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS level.Info(util_log.Logger).Log("msg", "starting analyze()", "tester", testerNumber, "total", numTesters) var n int // count iterated series - //pool := newPool(runtime.NumCPU()) - //pool := newPool(1) - bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer) + // pool := newPool(runtime.NumCPU()) + // pool := newPool(1) + bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) for _, tenant := range tenants { level.Info(util_log.Logger).Log("Analyzing tenant", tenant, "table", tableName) err := indexShipper.ForEach( diff --git a/tools/tsdb/bloom-tester/readlib.go b/tools/tsdb/bloom-tester/readlib.go index 93b0ba75b6d15..6be3b767ec634 100644 --- a/tools/tsdb/bloom-tester/readlib.go +++ b/tools/tsdb/bloom-tester/readlib.go @@ -118,14 +118,14 @@ func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexSh } level.Info(util_log.Logger).Log("msg", "starting analyze()", "tester", testerNumber, "total", numTesters) - //var n int // count iterated series - //reportEvery := 10 // report every n chunks - //pool := newPool(runtime.NumCPU()) - //pool := newPool(16) - //searchString := os.Getenv("SEARCH_STRING") - //147854,148226,145541,145603,147159,147836,145551,145599,147393,147841,145265,145620,146181,147225,147167,146131,146189,146739,147510,145572,146710,148031,29,146205,147175,146984,147345 - //mytenants := []string{"29"} - bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer) + // var n int // count iterated series + // reportEvery := 10 // report every n chunks + // pool := newPool(runtime.NumCPU()) + // pool := newPool(16) + // searchString := os.Getenv("SEARCH_STRING") + // 147854,148226,145541,145603,147159,147836,145551,145599,147393,147841,145265,145620,146181,147225,147167,146131,146189,146739,147510,145572,146710,148031,29,146205,147175,146984,147345 + // mytenants := []string{"29"} + bloomTokenizer, _ := bt.NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) for _, tenant := range tenants { level.Info(util_log.Logger).Log("Analyzing tenant", tenant, "table", tableName) err := shipper.ForEach( From 6b944f7c79281d453f1e6609c76a6f905689d00e Mon Sep 17 00:00:00 2001 From: Periklis Tsirakidis Date: Thu, 23 Nov 2023 15:40:33 +0100 Subject: [PATCH 42/48] operator: Fix custom CA for object-store in ruler component (#11288) Co-authored-by: Robert Jacob --- operator/CHANGELOG.md | 1 + operator/internal/manifests/ruler.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 5728306b7f9f9..8b415c1229f7e 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,6 @@ ## Main +- [11288](https://github.com/grafana/loki/pull/11288) **periklis**: Fix custom CA for object-store in ruler component - [11091](https://github.com/grafana/loki/pull/11091) **periklis**: Add automatic stream sharding support - [11022](https://github.com/grafana/loki/pull/11022) **JoaoBraveCoding**: Remove outdated BoltDB dashboards - [10932](https://github.com/grafana/loki/pull/10932) **JoaoBraveCoding**: Adds new value v13 to schema diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go index 902160486575a..4a432f7cc4339 100644 --- a/operator/internal/manifests/ruler.go +++ b/operator/internal/manifests/ruler.go @@ -17,6 +17,7 @@ import ( lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/manifests/internal/config" "github.com/grafana/loki/operator/internal/manifests/openshift" + "github.com/grafana/loki/operator/internal/manifests/storage" ) // BuildRuler returns a list of k8s objects for Loki Stack Ruler @@ -28,6 +29,10 @@ func BuildRuler(opts Options) ([]client.Object, error) { } } + if err := storage.ConfigureStatefulSet(statefulSet, opts.ObjectStorage); err != nil { + return nil, err + } + if opts.Gates.GRPCEncryption { if err := configureRulerGRPCServicePKI(statefulSet, opts); err != nil { return nil, err From c8093e1ee38983c70fd18eac6816c8aa5d12aa5a Mon Sep 17 00:00:00 2001 From: Vladyslav Diachenko <82767850+vlad-diachenko@users.noreply.github.com> Date: Fri, 24 Nov 2023 09:40:59 +0200 Subject: [PATCH 43/48] Revert "Compression of bloom blocks (#11267)" (#11307) This reverts commit af177034 (https://github.com/grafana/loki/pull/11267) because the compression/uncompression must be done outside of the client context: https://raintank-corp.slack.com/archives/C056KUQFBFT/p1700494151990649 Signed-off-by: Vladyslav Diachenko --- pkg/bloomcompactor/bloomcompactor.go | 8 +- pkg/storage/bloom/v1/archive.go | 42 +---------- pkg/storage/bloom/v1/block_writer.go | 8 +- pkg/storage/bloom/v1/reader.go | 4 +- .../stores/shipper/bloomshipper/client.go | 75 +++---------------- .../shipper/bloomshipper/client_test.go | 71 +++--------------- .../stores/shipper/bloomshipper/shipper.go | 5 +- .../shipper/bloomshipper/shipper_test.go | 5 +- 8 files changed, 32 insertions(+), 186 deletions(-) diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index 5ffd9d7dc6a7c..9eb7ed11ca5db 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -491,11 +491,6 @@ func buildBloomBlock( level.Error(logger).Log("reading bloomBlock", err) } - indexFile, err := os.Open(filepath.Join(localDst, seriesFileName)) - if err != nil { - level.Error(logger).Log("reading bloomBlock", err) - } - blocks := bloomshipper.Block{ BlockRef: bloomshipper.BlockRef{ Ref: bloomshipper.Ref{ @@ -509,8 +504,7 @@ func buildBloomBlock( }, IndexPath: job.IndexPath(), }, - BloomData: blockFile, - IndexData: indexFile, + Data: blockFile, } return blocks, nil diff --git a/pkg/storage/bloom/v1/archive.go b/pkg/storage/bloom/v1/archive.go index 7f252e3bde03e..4c0b124a05cf4 100644 --- a/pkg/storage/bloom/v1/archive.go +++ b/pkg/storage/bloom/v1/archive.go @@ -5,46 +5,12 @@ import ( "io" "os" "path/filepath" - "strings" "github.com/pkg/errors" "github.com/grafana/loki/pkg/chunkenc" ) -func TarGzMemory(dst io.Writer, src *ByteReader) error { - gzipper := chunkenc.GetWriterPool(chunkenc.EncGZIP).GetWriter(dst) - defer gzipper.Close() - - tarballer := tar.NewWriter(gzipper) - defer tarballer.Close() - - header := &tar.Header{ - Name: SeriesFileName, - Size: int64(src.index.Len()), - } - // Write the header - if err := tarballer.WriteHeader(header); err != nil { - return errors.Wrapf(err, "error writing tar header for index file") - } - // Write the file contents - if _, err := tarballer.Write(src.index.Bytes()); err != nil { - return errors.Wrapf(err, "error writing file contents for index file") - } - - header = &tar.Header{ - Name: BloomFileName, - Size: int64(src.blooms.Len()), - } - if err := tarballer.WriteHeader(header); err != nil { - return errors.Wrapf(err, "error writing tar header for bloom file") - } - if _, err := tarballer.Write(src.blooms.Bytes()); err != nil { - return errors.Wrapf(err, "error writing file contents for bloom file") - } - return nil -} - func TarGz(dst io.Writer, src *DirectoryBlockReader) error { if err := src.Init(); err != nil { return errors.Wrap(err, "error initializing directory block reader") @@ -111,13 +77,7 @@ func UnTarGz(dst string, r io.Reader) error { // if it's a file create it case tar.TypeReg: - err := os.MkdirAll(target[:strings.LastIndex(target, "/")], 0755) - if err != nil { - return errors.Wrapf(err, "error creating directory %s", target) - } - // TODO: We need to settle on how best to handle file permissions and ownership - // This may be utilizing a zip file instead of tar.gz - f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0755) + f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode)) if err != nil { return errors.Wrapf(err, "error creating file %s", target) } diff --git a/pkg/storage/bloom/v1/block_writer.go b/pkg/storage/bloom/v1/block_writer.go index 99ab65ef9cd40..317d1e598414a 100644 --- a/pkg/storage/bloom/v1/block_writer.go +++ b/pkg/storage/bloom/v1/block_writer.go @@ -12,8 +12,8 @@ import ( ) const ( - BloomFileName = "bloom" - SeriesFileName = "series" + bloomFileName = "bloom" + seriesFileName = "series" ) type BlockWriter interface { @@ -66,12 +66,12 @@ func (b *DirectoryBlockWriter) Init() error { return errors.Wrap(err, "creating bloom block dir") } - b.index, err = os.Create(filepath.Join(b.dir, SeriesFileName)) + b.index, err = os.Create(filepath.Join(b.dir, seriesFileName)) if err != nil { return errors.Wrap(err, "creating series file") } - b.blooms, err = os.Create(filepath.Join(b.dir, BloomFileName)) + b.blooms, err = os.Create(filepath.Join(b.dir, bloomFileName)) if err != nil { return errors.Wrap(err, "creating bloom file") } diff --git a/pkg/storage/bloom/v1/reader.go b/pkg/storage/bloom/v1/reader.go index d5c70a2b64d83..e4de9609b9082 100644 --- a/pkg/storage/bloom/v1/reader.go +++ b/pkg/storage/bloom/v1/reader.go @@ -49,12 +49,12 @@ func NewDirectoryBlockReader(dir string) *DirectoryBlockReader { func (r *DirectoryBlockReader) Init() error { if !r.initialized { var err error - r.index, err = os.Open(filepath.Join(r.dir, SeriesFileName)) + r.index, err = os.Open(filepath.Join(r.dir, seriesFileName)) if err != nil { return errors.Wrap(err, "opening series file") } - r.blooms, err = os.Open(filepath.Join(r.dir, BloomFileName)) + r.blooms, err = os.Open(filepath.Join(r.dir, bloomFileName)) if err != nil { return errors.Wrap(err, "opening bloom file") } diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go index 76cc4c2bfde9f..a68959e1d908e 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client.go +++ b/pkg/storage/stores/shipper/bloomshipper/client.go @@ -1,20 +1,16 @@ package bloomshipper import ( - "bufio" "bytes" "context" "encoding/json" "fmt" "io" - "os" "path/filepath" "strconv" "strings" "time" - v1 "github.com/grafana/loki/pkg/storage/bloom/v1" - "github.com/prometheus/common/model" "github.com/grafana/dskit/concurrency" @@ -79,8 +75,7 @@ type MetaClient interface { type Block struct { BlockRef - IndexData io.ReadCloser - BloomData io.ReadCloser + Data io.ReadCloser } type BlockClient interface { @@ -210,35 +205,13 @@ func (b *BloomClient) GetBlocks(ctx context.Context, references []BlockRef) (cha return fmt.Errorf("error while period lookup: %w", err) } objectClient := b.periodicObjectClients[period] - compressedObjectReadCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref)) + readCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref)) if err != nil { return fmt.Errorf("error while fetching object from storage: %w", err) } - defer func() { - compressedObjectReadCloser.Close() - }() - - workingDirectoryPath := filepath.Join(b.storageConfig.BloomShipperConfig.WorkingDirectory, reference.BlockPath, strconv.FormatInt(time.Now().UTC().UnixMilli(), 10)) - err = v1.UnTarGz(workingDirectoryPath, compressedObjectReadCloser) - if err != nil { - return fmt.Errorf("error while untarring: %w", err) - } - - indexFile, err := os.Open(filepath.Join(workingDirectoryPath, v1.SeriesFileName)) - if err != nil { - return fmt.Errorf("error while opening index file: %w", err) - } - indexReader := bufio.NewReader(indexFile) - - bloomFile, err := os.Open(filepath.Join(workingDirectoryPath, v1.BloomFileName)) - if err != nil { - return fmt.Errorf("error while opening bloom file: %w", err) - } - bloomReader := bufio.NewReader(bloomFile) blocksChannel <- Block{ - BlockRef: reference, - BloomData: io.NopCloser(bloomReader), - IndexData: io.NopCloser(indexReader), + BlockRef: reference, + Data: readCloser, } return nil }) @@ -252,25 +225,7 @@ func (b *BloomClient) GetBlocks(ctx context.Context, references []BlockRef) (cha return blocksChannel, errChannel } -func readCloserToBuffer(rc io.ReadCloser) *bytes.Buffer { - defer rc.Close() - - // Read the data from io.ReadCloser - data, err := io.ReadAll(rc) - if err != nil { - return nil - } - - // Write the data into a bytes.Buffer - var buf bytes.Buffer - _, err = buf.Write(data) - if err != nil { - return nil - } - - return &buf -} - +// TODO zip (archive) blocks before uploading to storage func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, error) { results := make([]Block, len(blocks)) //todo move concurrency to the config @@ -278,11 +233,7 @@ func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, e block := blocks[idx] defer func(Data io.ReadCloser) { _ = Data.Close() - }(block.BloomData) - - defer func(Data io.ReadCloser) { - _ = Data.Close() - }(block.IndexData) + }(block.Data) period, err := findPeriod(b.periodicConfigs, block.StartTimestamp) if err != nil { @@ -290,19 +241,11 @@ func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, e } key := createBlockObjectKey(block.Ref) objectClient := b.periodicObjectClients[period] - byteReader := v1.NewByteReader(readCloserToBuffer(block.IndexData), readCloserToBuffer(block.BloomData)) - - // TODO: Right now, this is asymetrical with the GetBlocks path. We have all the pieces - // in memory now, so it doesn't necessarily make sense to write the files to disk. That may change - // as we finalize on an archive format, and we may want to just house the downloaded files in memory instead. - // Create a buffer to write data - buf := new(bytes.Buffer) - err = v1.TarGzMemory(buf, byteReader) + data, err := io.ReadAll(block.Data) if err != nil { - return fmt.Errorf("error while tarring object data: %w", err) + return fmt.Errorf("error while reading object data: %w", err) } - - err = objectClient.PutObject(ctx, key, bytes.NewReader(buf.Bytes())) + err = objectClient.PutObject(ctx, key, bytes.NewReader(data)) if err != nil { return fmt.Errorf("error updloading block file: %w", err) } diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go index 6031e8bb06df0..4c4b6f855a8ec 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go @@ -1,9 +1,7 @@ package bloomshipper import ( - "archive/tar" "bytes" - "compress/gzip" "context" "encoding/json" "fmt" @@ -15,8 +13,6 @@ import ( "testing" "time" - v1 "github.com/grafana/loki/pkg/storage/bloom/v1" - aws_io "github.com/aws/smithy-go/io" "github.com/google/uuid" "github.com/prometheus/common/model" @@ -187,8 +183,6 @@ func Test_BloomClient_GetBlocks(t *testing.T) { secondBlockData := createBlockFile(t, secondBlockFullPath) require.FileExists(t, firstBlockFullPath) require.FileExists(t, secondBlockFullPath) - rootDir := filepath.Join(fsNamedStores["folder-1"].Directory, "bloom") - defer os.RemoveAll(rootDir) firstBlockRef := BlockRef{ Ref: Ref{ @@ -218,7 +212,7 @@ func Test_BloomClient_GetBlocks(t *testing.T) { blocksToDownload := []BlockRef{firstBlockRef, secondBlockRef} blocksCh, errorsCh := shipper.GetBlocks(context.Background(), blocksToDownload) - blocks := make(map[string][]byte) + blocks := make(map[string]string) func() { timout := time.After(5 * time.Second) for { @@ -232,14 +226,13 @@ func Test_BloomClient_GetBlocks(t *testing.T) { if !ok { return } - blockData, err := io.ReadAll(block.BloomData) + blockData, err := io.ReadAll(block.Data) require.NoError(t, err) - blocks[block.BlockRef.BlockPath] = blockData + blocks[block.BlockRef.BlockPath] = string(blockData) } } }() - defer os.RemoveAll("./bloom") firstBlockActualData, exists := blocks[firstBlockRef.BlockPath] require.Truef(t, exists, "data for the first block must be present in the results: %+v", blocks) @@ -252,42 +245,9 @@ func Test_BloomClient_GetBlocks(t *testing.T) { require.Len(t, blocks, 2) } -func extractFileFromTGZ(tarGzData []byte, targetFileName string) []byte { - gzReader, err := gzip.NewReader(bytes.NewReader(tarGzData)) - if err != nil { - return nil - } - defer gzReader.Close() - - tarReader := tar.NewReader(gzReader) - - for { - header, err := tarReader.Next() - - if err == io.EOF { - break - } - - if err != nil { - return nil - } - - if header.Name == targetFileName { - buffer := new(bytes.Buffer) - if _, err := io.Copy(buffer, tarReader); err != nil { - return nil - } - return buffer.Bytes() - } - } - - return nil -} - func Test_BloomClient_PutBlocks(t *testing.T) { shipper := createShipper(t) blockForFirstFolderData := "data1" - indexForFirstFolderData := "index1" blockForFirstFolder := Block{ BlockRef: BlockRef{ Ref: Ref{ @@ -301,12 +261,10 @@ func Test_BloomClient_PutBlocks(t *testing.T) { }, IndexPath: uuid.New().String(), }, - BloomData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForFirstFolderData))}, - IndexData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(indexForFirstFolderData))}, + Data: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForFirstFolderData))}, } blockForSecondFolderData := "data2" - indexForSecondFolderData := "index2" blockForSecondFolder := Block{ BlockRef: BlockRef{ Ref: Ref{ @@ -320,8 +278,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { }, IndexPath: uuid.New().String(), }, - BloomData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForSecondFolderData))}, - IndexData: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(indexForSecondFolderData))}, + Data: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForSecondFolderData))}, } results, err := shipper.PutBlocks(context.Background(), []Block{blockForFirstFolder, blockForSecondFolder}) @@ -343,7 +300,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { require.FileExists(t, savedFilePath) savedData, err := os.ReadFile(savedFilePath) require.NoError(t, err) - require.Equal(t, blockForFirstFolderData, string(extractFileFromTGZ(savedData, "bloom"))) + require.Equal(t, blockForFirstFolderData, string(savedData)) secondResultBlock := results[1] path = secondResultBlock.BlockPath @@ -362,7 +319,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { require.FileExists(t, savedFilePath) savedData, err = os.ReadFile(savedFilePath) require.NoError(t, err) - require.Equal(t, blockForSecondFolderData, string(extractFileFromTGZ(savedData, "bloom"))) + require.Equal(t, blockForSecondFolderData, string(savedData)) } func Test_BloomClient_DeleteBlocks(t *testing.T) { @@ -407,19 +364,13 @@ func Test_BloomClient_DeleteBlocks(t *testing.T) { require.NoFileExists(t, block2Path) } -func createBlockFile(t *testing.T, path string) []byte { +func createBlockFile(t *testing.T, path string) string { err := os.MkdirAll(path[:strings.LastIndex(path, "/")], 0755) require.NoError(t, err) - bloomContent := []byte(uuid.NewString()) - indexContent := []byte(uuid.NewString()) - outputFile, err := os.Create(path) - require.NoError(t, err) - byteReader := v1.NewByteReader(bytes.NewBuffer(indexContent), bytes.NewBuffer(bloomContent)) - err = v1.TarGzMemory(outputFile, byteReader) - require.NoError(t, err) - err = outputFile.Close() + fileContent := uuid.NewString() + err = os.WriteFile(path, []byte(fileContent), 0700) require.NoError(t, err) - return bloomContent + return fileContent } func Test_TablesByPeriod(t *testing.T) { diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go index 0272a8e4f736a..2df1f41cd4a25 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go @@ -207,8 +207,7 @@ func (s *Shipper) createBlockQuerier(directory string) *v1.BlockQuerier { } func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, error) { - defer block.BloomData.Close() - defer block.IndexData.Close() + defer block.Data.Close() archivePath := filepath.Join(workingDirectoryPath, block.BlockPath[strings.LastIndex(block.BlockPath, delimiter)+1:]) archiveFile, err := os.Create(archivePath) @@ -216,7 +215,7 @@ func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, err return "", fmt.Errorf("error creating empty file to store the archiver: %w", err) } defer archiveFile.Close() - _, err = io.Copy(archiveFile, block.BloomData) + _, err = io.Copy(archiveFile, block.Data) if err != nil { return "", fmt.Errorf("error writing data to archive file: %w", err) } diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go index 2f662b2b793d5..45450c0e3838b 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go @@ -245,9 +245,8 @@ func Test_Shipper_extractBlock(t *testing.T) { shipper := Shipper{config: config.Config{WorkingDirectory: workingDir}} ts := time.Now().UTC() block := Block{ - BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"}, - BloomData: blockFile, - IndexData: seriesFile, + BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"}, + Data: blockFile, } actualPath, err := shipper.extractBlock(&block, ts) From 10fe48b815cb2b7ea64311f07b0ab02bdd7dacc6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 24 Nov 2023 10:10:29 +0100 Subject: [PATCH 44/48] chore(deps): update alpine docker tag to v3.18.4 (main) (#11167) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate logo banner](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | alpine | final | patch | `3.18.3` -> `3.18.4` | | alpine | final | minor | `3.16.7` -> `3.18.4` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/grafana/loki). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- clients/cmd/docker-driver/Dockerfile | 2 +- clients/cmd/promtail/Dockerfile.debug | 2 +- cmd/logcli/Dockerfile | 2 +- cmd/logql-analyzer/Dockerfile | 2 +- cmd/loki-canary-boringcrypto/Dockerfile | 2 +- cmd/loki-canary/Dockerfile | 2 +- cmd/loki-canary/Dockerfile.cross | 2 +- cmd/loki/Dockerfile | 2 +- cmd/loki/Dockerfile.cross | 2 +- cmd/loki/Dockerfile.debug | 2 +- cmd/migrate/Dockerfile | 2 +- cmd/querytee/Dockerfile | 2 +- cmd/querytee/Dockerfile.cross | 2 +- production/helm/loki/src/helm-test/Dockerfile | 2 +- tools/dev/loki-boltdb-storage-s3/dev.dockerfile | 2 +- tools/lambda-promtail/Dockerfile | 2 +- tools/tsdb/bloom-tester/Dockerfile | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile index d83b36b8f9052..5f81dedbb5bf4 100644 --- a/clients/cmd/docker-driver/Dockerfile +++ b/clients/cmd/docker-driver/Dockerfile @@ -9,7 +9,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false clients/cmd/docker-driver/docker-driver -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates tzdata COPY --from=build /src/loki/clients/cmd/docker-driver/docker-driver /bin/docker-driver WORKDIR /bin/ diff --git a/clients/cmd/promtail/Dockerfile.debug b/clients/cmd/promtail/Dockerfile.debug index 85a6396ac4c0a..1ff864251982e 100644 --- a/clients/cmd/promtail/Dockerfile.debug +++ b/clients/cmd/promtail/Dockerfile.debug @@ -9,7 +9,7 @@ WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false PROMTAIL_JOURNAL_ENABLED=true promtail-debug -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates tzdata COPY --from=build /src/loki/clients/cmd/promtail/promtail-debug /usr/bin/promtail-debug COPY --from=build /usr/bin/dlv /usr/bin/dlv diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile index e59c2c986945d..c273d8cc70e44 100644 --- a/cmd/logcli/Dockerfile +++ b/cmd/logcli/Dockerfile @@ -4,7 +4,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false logcli -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --no-cache ca-certificates diff --git a/cmd/logql-analyzer/Dockerfile b/cmd/logql-analyzer/Dockerfile index 2f1d619e0f30a..d434281ce411c 100644 --- a/cmd/logql-analyzer/Dockerfile +++ b/cmd/logql-analyzer/Dockerfile @@ -4,7 +4,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && CGO_ENABLED=0 go build ./cmd/logql-analyzer/ -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --no-cache ca-certificates diff --git a/cmd/loki-canary-boringcrypto/Dockerfile b/cmd/loki-canary-boringcrypto/Dockerfile index 45fb9a20e6a41..e6793f2a6d0e4 100644 --- a/cmd/loki-canary-boringcrypto/Dockerfile +++ b/cmd/loki-canary-boringcrypto/Dockerfile @@ -5,7 +5,7 @@ WORKDIR /src/loki RUN go env GOARCH > /goarch RUN make clean && make GOARCH=$(cat /goarch) BUILD_IN_CONTAINER=true GOEXPERIMENT=boringcrypto loki-canary-boringcrypto -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates RUN apk add --no-cache libc6-compat COPY --from=build /src/loki/cmd/loki-canary-boringcrypto/loki-canary-boringcrypto /usr/bin/loki-canary diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile index e3443483026d8..017bf6083df94 100644 --- a/cmd/loki-canary/Dockerfile +++ b/cmd/loki-canary/Dockerfile @@ -4,7 +4,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki-canary -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/loki-canary/loki-canary /usr/bin/loki-canary ENTRYPOINT [ "/usr/bin/loki-canary" ] diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross index 34fb12a4328b2..6815f45dcbf10 100644 --- a/cmd/loki-canary/Dockerfile.cross +++ b/cmd/loki-canary/Dockerfile.cross @@ -12,7 +12,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-canary -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/loki-canary/loki-canary /usr/bin/loki-canary ENTRYPOINT [ "/usr/bin/loki-canary" ] diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile index a398eba1682f3..520600e759824 100644 --- a/cmd/loki/Dockerfile +++ b/cmd/loki/Dockerfile @@ -4,7 +4,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --no-cache ca-certificates libcap diff --git a/cmd/loki/Dockerfile.cross b/cmd/loki/Dockerfile.cross index c6ee2865f61db..134683f615bb8 100644 --- a/cmd/loki/Dockerfile.cross +++ b/cmd/loki/Dockerfile.cross @@ -12,7 +12,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --no-cache ca-certificates diff --git a/cmd/loki/Dockerfile.debug b/cmd/loki/Dockerfile.debug index dc022e3499826..539dfdf90b26e 100644 --- a/cmd/loki/Dockerfile.debug +++ b/cmd/loki/Dockerfile.debug @@ -15,7 +15,7 @@ WORKDIR /src/loki RUN make clean && \ GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-debug -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/loki/loki-debug /usr/bin/loki-debug COPY --from=goenv /go/bin/dlv /usr/bin/dlv diff --git a/cmd/migrate/Dockerfile b/cmd/migrate/Dockerfile index 32c5ed06d046a..3fe4bbdc7a4ea 100644 --- a/cmd/migrate/Dockerfile +++ b/cmd/migrate/Dockerfile @@ -3,7 +3,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false migrate -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/migrate/migrate /usr/bin/migrate #ENTRYPOINT [ "/usr/bin/migrate" ] diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile index 8198b732b012b..a750c0efeb98a 100644 --- a/cmd/querytee/Dockerfile +++ b/cmd/querytee/Dockerfile @@ -4,7 +4,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki-querytee -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/querytee/querytee /usr/bin/querytee ENTRYPOINT [ "/usr/bin/querytee" ] diff --git a/cmd/querytee/Dockerfile.cross b/cmd/querytee/Dockerfile.cross index 6f886e8d6d096..f759f5403f472 100644 --- a/cmd/querytee/Dockerfile.cross +++ b/cmd/querytee/Dockerfile.cross @@ -12,7 +12,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-querytee -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/querytee/querytee /usr/bin/querytee ENTRYPOINT [ "/usr/bin/querytee" ] diff --git a/production/helm/loki/src/helm-test/Dockerfile b/production/helm/loki/src/helm-test/Dockerfile index 253a10fd44bf8..012e48b84a38f 100644 --- a/production/helm/loki/src/helm-test/Dockerfile +++ b/production/helm/loki/src/helm-test/Dockerfile @@ -7,7 +7,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false helm-test -FROM alpine:3.16.7 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates=20230506-r0 COPY --from=build /src/loki/production/helm/loki/src/helm-test/helm-test /usr/bin/helm-test ENTRYPOINT [ "/usr/bin/helm-test" ] diff --git a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile index 3b8912b4120ab..4a2a420fd0938 100644 --- a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile +++ b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile @@ -2,7 +2,7 @@ FROM golang:1.20.4 ENV CGO_ENABLED=0 RUN go install github.com/go-delve/delve/cmd/dlv@v1.21.1 -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN mkdir /loki WORKDIR /loki diff --git a/tools/lambda-promtail/Dockerfile b/tools/lambda-promtail/Dockerfile index 8e94327990996..bac1cdf258f2a 100644 --- a/tools/lambda-promtail/Dockerfile +++ b/tools/lambda-promtail/Dockerfile @@ -12,7 +12,7 @@ RUN go mod download RUN go build -o ./main -tags lambda.norpc -ldflags="-s -w" lambda-promtail/*.go -FROM alpine:3.18.3 +FROM alpine:3.18.4 WORKDIR /app diff --git a/tools/tsdb/bloom-tester/Dockerfile b/tools/tsdb/bloom-tester/Dockerfile index d471e5d907005..d5f45d54da355 100644 --- a/tools/tsdb/bloom-tester/Dockerfile +++ b/tools/tsdb/bloom-tester/Dockerfile @@ -6,7 +6,7 @@ WORKDIR /src/bloom-tester RUN make bloom-tester -FROM alpine:3.18.3 +FROM alpine:3.18.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/bloom-tester/tools/tsdb/bloom-tester/bloom-tester /usr/bin/bloom-tester ENTRYPOINT [ "/usr/bin/bloom-tester", "--config.file=/etc/loki/config.yaml" ] From 75cfe5959694d69841a632f39a8f078e9ad3c2e6 Mon Sep 17 00:00:00 2001 From: Vladyslav Diachenko <82767850+vlad-diachenko@users.noreply.github.com> Date: Fri, 24 Nov 2023 12:27:37 +0200 Subject: [PATCH 45/48] bloom blocks downloading queue (#11201) implemented bloom blocks downloading queue to control the concurrency of downloading the blocks from the storage Signed-off-by: Vladyslav Diachenko --- docs/sources/configure/_index.md | 14 ++ pkg/bloomgateway/bloomgateway.go | 4 +- pkg/bloomgateway/bloomgateway_test.go | 26 +- pkg/bloomgateway/sharding.go | 1 + pkg/loki/modules.go | 2 +- pkg/storage/bloom/v1/block_writer.go | 8 +- pkg/storage/bloom/v1/reader.go | 4 +- .../shipper/bloomshipper/block_downloader.go | 230 ++++++++++++++++++ .../bloomshipper/block_downloader_test.go | 168 +++++++++++++ .../stores/shipper/bloomshipper/client.go | 51 ++-- .../shipper/bloomshipper/client_test.go | 81 +++--- .../shipper/bloomshipper/config/config.go | 14 +- .../stores/shipper/bloomshipper/shipper.go | 100 ++------ .../shipper/bloomshipper/shipper_test.go | 68 ------ pkg/validation/limits.go | 20 +- 15 files changed, 541 insertions(+), 250 deletions(-) create mode 100644 pkg/storage/stores/shipper/bloomshipper/block_downloader.go create mode 100644 pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index 5c3dba3d15de2..446a48adcd979 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -2248,6 +2248,16 @@ bloom_shipper: # Working directory to store downloaded Bloom Blocks. # CLI flag: -bloom.shipper.working-directory [working_directory: | default = "bloom-shipper"] + + blocks_downloading_queue: + # The count of parallel workers that download Bloom Blocks. + # CLI flag: -bloom.shipper.blocks-downloading-queue.workers-count + [workers_count: | default = 100] + + # Maximum number of task in queue per tenant per bloom-gateway. Enqueuing + # the tasks above this limit will fail an error. + # CLI flag: -bloom.shipper.blocks-downloading-queue.max_tasks_enqueued_per_tenant + [max_tasks_enqueued_per_tenant: | default = 10000] ``` ### chunk_store_config @@ -2990,6 +3000,10 @@ shard_streams: # CLI flag: -bloom-compactor.false-positive-rate [bloom_false_positive_rate: | default = 0.01] +# Maximum number of blocks will be downloaded in parallel by the Bloom Gateway. +# CLI flag: -bloom-gateway.blocks-downloading-parallelism +[bloom_gateway_blocks_downloading_parallelism: | default = 50] + # Allow user to send structured metadata in push payload. # CLI flag: -validation.allow-structured-metadata [allow_structured_metadata: | default = false] diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index b0b01c34dbaf6..425d6713e92f9 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -187,7 +187,7 @@ type Gateway struct { } // New returns a new instance of the Bloom Gateway. -func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, shardingStrategy ShardingStrategy, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) { +func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, overrides Limits, shardingStrategy ShardingStrategy, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) { g := &Gateway{ cfg: cfg, logger: logger, @@ -205,7 +205,7 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, s return nil, err } - bloomShipper, err := bloomshipper.NewShipper(client, storageCfg.BloomShipperConfig, logger) + bloomShipper, err := bloomshipper.NewShipper(client, storageCfg.BloomShipperConfig, overrides, logger, reg) if err != nil { return nil, err } diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index c0d9ffdfae230..0b6a207362ac6 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -82,7 +82,7 @@ func TestBloomGateway_StartStopService(t *testing.T) { }, } - gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg) + gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) require.NoError(t, err) err = services.StartAndAwaitRunning(context.Background(), gw) @@ -142,7 +142,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { t.Run("returns unfiltered chunk refs if no filters provided", func(t *testing.T) { reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg) + gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) require.NoError(t, err) err = services.StartAndAwaitRunning(context.Background(), gw) @@ -188,7 +188,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { t.Run("returns error if chunk refs do not belong to tenant", func(t *testing.T) { reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg) + gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) require.NoError(t, err) ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00") @@ -212,7 +212,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { t.Run("gateway tracks active users", func(t *testing.T) { reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, ss, cm, logger, reg) + gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) require.NoError(t, err) err = services.StartAndAwaitRunning(context.Background(), gw) @@ -248,3 +248,21 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { require.ElementsMatch(t, tenants, gw.activeUsers.ActiveUsers()) }) } + +type fakeLimits struct { +} + +func (f fakeLimits) BloomGatewayShardSize(_ string) int { + //TODO implement me + panic("implement me") +} + +func (f fakeLimits) BloomGatewayEnabled(_ string) bool { + //TODO implement me + panic("implement me") +} + +func (f fakeLimits) BloomGatewayBlocksDownloadingParallelism(_ string) int { + //TODO implement me + panic("implement me") +} diff --git a/pkg/bloomgateway/sharding.go b/pkg/bloomgateway/sharding.go index 4bd288ccfe43b..09926284b3794 100644 --- a/pkg/bloomgateway/sharding.go +++ b/pkg/bloomgateway/sharding.go @@ -38,6 +38,7 @@ var ( type Limits interface { BloomGatewayShardSize(tenantID string) int BloomGatewayEnabled(tenantID string) bool + BloomGatewayBlocksDownloadingParallelism(userID string) int } type ShardingStrategy interface { diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index e0e8ab4d1f88d..bf450f852be5a 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1263,7 +1263,7 @@ func (t *Loki) initBloomGateway() (services.Service, error) { shuffleSharding := bloomgateway.NewShuffleShardingStrategy(t.bloomGatewayRingManager.Ring, t.bloomGatewayRingManager.RingLifecycler, t.Overrides, logger) - gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.Cfg.SchemaConfig, t.Cfg.StorageConfig, shuffleSharding, t.clientMetrics, logger, prometheus.DefaultRegisterer) + gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.Cfg.SchemaConfig, t.Cfg.StorageConfig, t.Overrides, shuffleSharding, t.clientMetrics, logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } diff --git a/pkg/storage/bloom/v1/block_writer.go b/pkg/storage/bloom/v1/block_writer.go index 317d1e598414a..99ab65ef9cd40 100644 --- a/pkg/storage/bloom/v1/block_writer.go +++ b/pkg/storage/bloom/v1/block_writer.go @@ -12,8 +12,8 @@ import ( ) const ( - bloomFileName = "bloom" - seriesFileName = "series" + BloomFileName = "bloom" + SeriesFileName = "series" ) type BlockWriter interface { @@ -66,12 +66,12 @@ func (b *DirectoryBlockWriter) Init() error { return errors.Wrap(err, "creating bloom block dir") } - b.index, err = os.Create(filepath.Join(b.dir, seriesFileName)) + b.index, err = os.Create(filepath.Join(b.dir, SeriesFileName)) if err != nil { return errors.Wrap(err, "creating series file") } - b.blooms, err = os.Create(filepath.Join(b.dir, bloomFileName)) + b.blooms, err = os.Create(filepath.Join(b.dir, BloomFileName)) if err != nil { return errors.Wrap(err, "creating bloom file") } diff --git a/pkg/storage/bloom/v1/reader.go b/pkg/storage/bloom/v1/reader.go index e4de9609b9082..d5c70a2b64d83 100644 --- a/pkg/storage/bloom/v1/reader.go +++ b/pkg/storage/bloom/v1/reader.go @@ -49,12 +49,12 @@ func NewDirectoryBlockReader(dir string) *DirectoryBlockReader { func (r *DirectoryBlockReader) Init() error { if !r.initialized { var err error - r.index, err = os.Open(filepath.Join(r.dir, seriesFileName)) + r.index, err = os.Open(filepath.Join(r.dir, SeriesFileName)) if err != nil { return errors.Wrap(err, "opening series file") } - r.blooms, err = os.Open(filepath.Join(r.dir, bloomFileName)) + r.blooms, err = os.Open(filepath.Join(r.dir, BloomFileName)) if err != nil { return errors.Wrap(err, "opening bloom file") } diff --git a/pkg/storage/stores/shipper/bloomshipper/block_downloader.go b/pkg/storage/stores/shipper/bloomshipper/block_downloader.go new file mode 100644 index 0000000000000..b6721db88640e --- /dev/null +++ b/pkg/storage/stores/shipper/bloomshipper/block_downloader.go @@ -0,0 +1,230 @@ +package bloomshipper + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/pkg/queue" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config" + "github.com/grafana/loki/pkg/util" + "github.com/grafana/loki/pkg/util/constants" +) + +type blockDownloader struct { + logger log.Logger + + workingDirectory string + queueMetrics *queue.Metrics + queue *queue.RequestQueue + blockClient BlockClient + limits Limits + activeUsersService *util.ActiveUsersCleanupService + + ctx context.Context + manager *services.Manager + onWorkerStopCallback func() +} + +func newBlockDownloader(config config.Config, blockClient BlockClient, limits Limits, logger log.Logger, reg prometheus.Registerer) (*blockDownloader, error) { + queueMetrics := queue.NewMetrics(reg, constants.Loki, "bloom_blocks_downloader") + //add cleanup service + downloadingQueue := queue.NewRequestQueue(config.BlocksDownloadingQueue.MaxTasksEnqueuedPerTenant, time.Minute, queueMetrics) + activeUsersService := util.NewActiveUsersCleanupWithDefaultValues(queueMetrics.Cleanup) + + ctx := context.Background() + manager, err := services.NewManager(downloadingQueue, activeUsersService) + if err != nil { + return nil, fmt.Errorf("error creating service manager: %w", err) + } + err = services.StartManagerAndAwaitHealthy(ctx, manager) + if err != nil { + return nil, fmt.Errorf("error starting service manager: %w", err) + } + + b := &blockDownloader{ + ctx: ctx, + logger: logger, + workingDirectory: config.WorkingDirectory, + queueMetrics: queueMetrics, + queue: downloadingQueue, + blockClient: blockClient, + activeUsersService: activeUsersService, + limits: limits, + manager: manager, + onWorkerStopCallback: onWorkerStopNoopCallback, + } + + for i := 0; i < config.BlocksDownloadingQueue.WorkersCount; i++ { + go b.serveDownloadingTasks(fmt.Sprintf("worker-%d", i)) + } + return b, nil +} + +type BlockDownloadingTask struct { + ctx context.Context + block BlockRef + // ErrCh is a send-only channel to write an error to + ErrCh chan<- error + // ResultsCh is a send-only channel to return the block querier for the downloaded block + ResultsCh chan<- blockWithQuerier +} + +func NewBlockDownloadingTask(ctx context.Context, block BlockRef, resCh chan<- blockWithQuerier, errCh chan<- error) *BlockDownloadingTask { + return &BlockDownloadingTask{ + ctx: ctx, + block: block, + ErrCh: errCh, + ResultsCh: resCh, + } +} + +// noop implementation +var onWorkerStopNoopCallback = func() {} + +func (d *blockDownloader) serveDownloadingTasks(workerID string) { + logger := log.With(d.logger, "worker", workerID) + level.Debug(logger).Log("msg", "starting worker") + + d.queue.RegisterConsumerConnection(workerID) + defer d.queue.UnregisterConsumerConnection(workerID) + //this callback is used only in the tests to assert that worker is stopped + defer d.onWorkerStopCallback() + + idx := queue.StartIndexWithLocalQueue + + for { + item, newIdx, err := d.queue.Dequeue(d.ctx, idx, workerID) + if err != nil { + if !errors.Is(err, queue.ErrStopped) && !errors.Is(err, context.Canceled) { + level.Error(logger).Log("msg", "failed to dequeue task", "err", err) + continue + } + level.Info(logger).Log("msg", "stopping worker") + return + } + task, ok := item.(*BlockDownloadingTask) + if !ok { + level.Error(logger).Log("msg", "failed to cast to BlockDownloadingTask", "item", fmt.Sprintf("%+v", item), "type", fmt.Sprintf("%T", item)) + continue + } + + idx = newIdx + blockPath := task.block.BlockPath + //todo add cache before downloading + level.Debug(logger).Log("msg", "start downloading the block", "block", blockPath) + block, err := d.blockClient.GetBlock(task.ctx, task.block) + if err != nil { + level.Error(logger).Log("msg", "error downloading the block", "block", blockPath, "err", err) + task.ErrCh <- fmt.Errorf("error downloading the block %s : %w", blockPath, err) + continue + } + directory, err := d.extractBlock(&block, time.Now()) + if err != nil { + level.Error(logger).Log("msg", "error extracting the block", "block", blockPath, "err", err) + task.ErrCh <- fmt.Errorf("error extracting the block %s : %w", blockPath, err) + continue + } + level.Debug(d.logger).Log("msg", "block has been downloaded and extracted", "block", task.block.BlockPath, "directory", directory) + blockQuerier := d.createBlockQuerier(directory) + task.ResultsCh <- blockWithQuerier{ + BlockRef: task.block, + BlockQuerier: blockQuerier, + } + } +} + +func (d *blockDownloader) downloadBlocks(ctx context.Context, tenantID string, references []BlockRef) (chan blockWithQuerier, chan error) { + d.activeUsersService.UpdateUserTimestamp(tenantID, time.Now()) + // we need to have errCh with size that can keep max count of errors to prevent the case when + // the queue worker reported the error to this channel before the current goroutine + // and this goroutine will go to the deadlock because it won't be able to report an error + // because nothing reads this channel at this moment. + errCh := make(chan error, len(references)) + blocksCh := make(chan blockWithQuerier, len(references)) + + downloadingParallelism := d.limits.BloomGatewayBlocksDownloadingParallelism(tenantID) + for _, reference := range references { + task := NewBlockDownloadingTask(ctx, reference, blocksCh, errCh) + level.Debug(d.logger).Log("msg", "enqueuing task to download block", "block", reference.BlockPath) + err := d.queue.Enqueue(tenantID, nil, task, downloadingParallelism, nil) + if err != nil { + errCh <- fmt.Errorf("error enquing downloading task for block %s : %w", reference.BlockPath, err) + return blocksCh, errCh + } + } + return blocksCh, errCh +} + +type blockWithQuerier struct { + BlockRef + *v1.BlockQuerier +} + +// extract the files into directory and returns absolute path to this directory. +func (d *blockDownloader) extractBlock(block *Block, ts time.Time) (string, error) { + workingDirectoryPath := filepath.Join(d.workingDirectory, block.BlockPath, strconv.FormatInt(ts.UnixMilli(), 10)) + err := os.MkdirAll(workingDirectoryPath, os.ModePerm) + if err != nil { + return "", fmt.Errorf("can not create directory to extract the block: %w", err) + } + archivePath, err := writeDataToTempFile(workingDirectoryPath, block) + if err != nil { + return "", fmt.Errorf("error writing data to temp file: %w", err) + } + defer func() { + os.Remove(archivePath) + // todo log err + }() + err = extractArchive(archivePath, workingDirectoryPath) + if err != nil { + return "", fmt.Errorf("error extracting archive: %w", err) + } + return workingDirectoryPath, nil +} + +func (d *blockDownloader) createBlockQuerier(directory string) *v1.BlockQuerier { + reader := v1.NewDirectoryBlockReader(directory) + block := v1.NewBlock(reader) + return v1.NewBlockQuerier(block) +} + +func (d *blockDownloader) stop() { + _ = services.StopManagerAndAwaitStopped(d.ctx, d.manager) +} + +func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, error) { + defer block.Data.Close() + archivePath := filepath.Join(workingDirectoryPath, block.BlockPath[strings.LastIndex(block.BlockPath, delimiter)+1:]) + + archiveFile, err := os.Create(archivePath) + if err != nil { + return "", fmt.Errorf("error creating empty file to store the archiver: %w", err) + } + defer archiveFile.Close() + _, err = io.Copy(archiveFile, block.Data) + if err != nil { + return "", fmt.Errorf("error writing data to archive file: %w", err) + } + return archivePath, nil +} + +func extractArchive(archivePath string, workingDirectoryPath string) error { + file, err := os.Open(archivePath) + if err != nil { + return fmt.Errorf("error opening archive file %s: %w", file.Name(), err) + } + return v1.UnTarGz(workingDirectoryPath, file) +} diff --git a/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go new file mode 100644 index 0000000000000..b69d036d30e37 --- /dev/null +++ b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go @@ -0,0 +1,168 @@ +package bloomshipper + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config" + "github.com/grafana/loki/pkg/validation" +) + +func Test_blockDownloader_downloadBlocks(t *testing.T) { + overrides, err := validation.NewOverrides(validation.Limits{BloomGatewayBlocksDownloadingParallelism: 20}, nil) + require.NoError(t, err) + workingDirectory := t.TempDir() + + blockReferences, blockClient := createFakeBlocks(t, 20) + blockClient.responseDelay = 100 * time.Millisecond + workersCount := 10 + downloader, err := newBlockDownloader(config.Config{ + WorkingDirectory: workingDirectory, + BlocksDownloadingQueue: config.DownloadingQueueConfig{ + WorkersCount: workersCount, + MaxTasksEnqueuedPerTenant: 20, + }, + }, blockClient, overrides, log.NewNopLogger(), prometheus.DefaultRegisterer) + stoppedWorkersCount := atomic.NewInt32(0) + downloader.onWorkerStopCallback = func() { + stoppedWorkersCount.Inc() + } + require.NoError(t, err) + blocksCh, errorsCh := downloader.downloadBlocks(context.Background(), "fake", blockReferences) + downloadedBlocks := make(map[string]any, len(blockReferences)) + done := make(chan bool) + go func() { + for i := 0; i < 20; i++ { + block := <-blocksCh + downloadedBlocks[block.BlockPath] = nil + } + done <- true + }() + + select { + //20 blocks, 10 workers, fixed delay 100ms per block: the total downloading time must be ~200ms. + case <-time.After(2 * time.Second): + t.Fatalf("test must complete before the timeout") + case err := <-errorsCh: + require.NoError(t, err) + case <-done: + } + require.Len(t, downloadedBlocks, 20, "all 20 block must be downloaded") + + downloader.stop() + require.Eventuallyf(t, func() bool { + return stoppedWorkersCount.Load() == int32(workersCount) + }, 1*time.Second, 10*time.Millisecond, "expected all %d workers to be stopped", workersCount) +} + +// creates fake blocks and returns map[block-path]Block and mockBlockClient +func createFakeBlocks(t *testing.T, count int) ([]BlockRef, *mockBlockClient) { + mockData := make(map[string]Block, count) + refs := make([]BlockRef, 0, count) + for i := 0; i < count; i++ { + archive, _, _ := createBlockArchive(t) + block := Block{ + BlockRef: BlockRef{ + BlockPath: fmt.Sprintf("block-path-%d", i), + }, + Data: archive, + } + mockData[block.BlockPath] = block + refs = append(refs, block.BlockRef) + } + return refs, &mockBlockClient{mockData: mockData} +} + +type mockBlockClient struct { + responseDelay time.Duration + mockData map[string]Block +} + +func (m *mockBlockClient) GetBlock(_ context.Context, reference BlockRef) (Block, error) { + time.Sleep(m.responseDelay) + block, exists := m.mockData[reference.BlockPath] + if exists { + return block, nil + } + + return block, fmt.Errorf("block %s is not found in mockData", reference.BlockPath) +} + +func (m *mockBlockClient) PutBlocks(_ context.Context, _ []Block) ([]Block, error) { + panic("implement me") +} + +func (m *mockBlockClient) DeleteBlocks(_ context.Context, _ []BlockRef) error { + panic("implement me") +} + +func Test_blockDownloader_extractBlock(t *testing.T) { + blockFile, bloomFileContent, seriesFileContent := createBlockArchive(t) + + workingDir := t.TempDir() + downloader := &blockDownloader{workingDirectory: workingDir} + ts := time.Now().UTC() + block := Block{ + BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"}, + Data: blockFile, + } + + actualPath, err := downloader.extractBlock(&block, ts) + + require.NoError(t, err) + expectedPath := filepath.Join(workingDir, block.BlockPath, strconv.FormatInt(ts.UnixMilli(), 10)) + require.Equal(t, expectedPath, actualPath, + "expected archive to be extracted to working directory under the same path as blockPath and with timestamp suffix") + require.FileExists(t, filepath.Join(expectedPath, v1.BloomFileName)) + require.FileExists(t, filepath.Join(expectedPath, v1.SeriesFileName)) + + actualBloomFileContent, err := os.ReadFile(filepath.Join(expectedPath, v1.BloomFileName)) + require.NoError(t, err) + require.Equal(t, bloomFileContent, string(actualBloomFileContent)) + + actualSeriesFileContent, err := os.ReadFile(filepath.Join(expectedPath, v1.SeriesFileName)) + require.NoError(t, err) + require.Equal(t, seriesFileContent, string(actualSeriesFileContent)) +} + +func createBlockArchive(t *testing.T) (*os.File, string, string) { + dir := t.TempDir() + mockBlockDir := filepath.Join(dir, "mock-block-dir") + err := os.MkdirAll(mockBlockDir, 0777) + require.NoError(t, err) + bloomFile, err := os.Create(filepath.Join(mockBlockDir, v1.BloomFileName)) + require.NoError(t, err) + bloomFileContent := uuid.NewString() + _, err = io.Copy(bloomFile, bytes.NewReader([]byte(bloomFileContent))) + require.NoError(t, err) + + seriesFile, err := os.Create(filepath.Join(mockBlockDir, v1.SeriesFileName)) + require.NoError(t, err) + seriesFileContent := uuid.NewString() + _, err = io.Copy(seriesFile, bytes.NewReader([]byte(seriesFileContent))) + require.NoError(t, err) + + blockFilePath := filepath.Join(dir, "test-block-archive") + file, err := os.OpenFile(blockFilePath, os.O_CREATE|os.O_RDWR, 0700) + require.NoError(t, err) + err = v1.TarGz(file, v1.NewDirectoryBlockReader(mockBlockDir)) + require.NoError(t, err) + + blockFile, err := os.OpenFile(blockFilePath, os.O_RDONLY, 0700) + require.NoError(t, err) + return blockFile, bloomFileContent, seriesFileContent +} diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go index a68959e1d908e..5709bf8866f21 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client.go +++ b/pkg/storage/stores/shipper/bloomshipper/client.go @@ -79,7 +79,7 @@ type Block struct { } type BlockClient interface { - GetBlocks(ctx context.Context, references []BlockRef) (chan Block, chan error) + GetBlock(ctx context.Context, reference BlockRef) (Block, error) PutBlocks(ctx context.Context, blocks []Block) ([]Block, error) DeleteBlocks(ctx context.Context, blocks []BlockRef) error } @@ -190,42 +190,23 @@ func (b *BloomClient) DeleteMeta(ctx context.Context, meta Meta) error { return b.periodicObjectClients[periodFrom].DeleteObject(ctx, key) } -// GetBlocks downloads all the blocks from objectStorage in parallel and sends the downloaded blocks -// via the channel Block that is closed only if all the blocks are downloaded without errors. -// If an error happens, the error will be sent via error channel. -func (b *BloomClient) GetBlocks(ctx context.Context, references []BlockRef) (chan Block, chan error) { - blocksChannel := make(chan Block, len(references)) - errChannel := make(chan error) - go func() { - //todo move concurrency to the config - err := concurrency.ForEachJob(ctx, len(references), 100, func(ctx context.Context, idx int) error { - reference := references[idx] - period, err := findPeriod(b.periodicConfigs, reference.StartTimestamp) - if err != nil { - return fmt.Errorf("error while period lookup: %w", err) - } - objectClient := b.periodicObjectClients[period] - readCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref)) - if err != nil { - return fmt.Errorf("error while fetching object from storage: %w", err) - } - blocksChannel <- Block{ - BlockRef: reference, - Data: readCloser, - } - return nil - }) - if err != nil { - errChannel <- fmt.Errorf("error downloading block file: %w", err) - return - } - //close blocks channel only if there is no error - close(blocksChannel) - }() - return blocksChannel, errChannel +// GetBlock downloads the blocks from objectStorage and returns the downloaded block +func (b *BloomClient) GetBlock(ctx context.Context, reference BlockRef) (Block, error) { + period, err := findPeriod(b.periodicConfigs, reference.StartTimestamp) + if err != nil { + return Block{}, fmt.Errorf("error while period lookup: %w", err) + } + objectClient := b.periodicObjectClients[period] + readCloser, _, err := objectClient.GetObject(ctx, createBlockObjectKey(reference.Ref)) + if err != nil { + return Block{}, fmt.Errorf("error while fetching object from storage: %w", err) + } + return Block{ + BlockRef: reference, + Data: readCloser, + }, nil } -// TODO zip (archive) blocks before uploading to storage func (b *BloomClient) PutBlocks(ctx context.Context, blocks []Block) ([]Block, error) { results := make([]Block, len(blocks)) //todo move concurrency to the config diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go index 4c4b6f855a8ec..7267856a43155 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go @@ -32,7 +32,7 @@ var ( ) func Test_BloomClient_GetMetas(t *testing.T) { - shipper := createShipper(t) + shipper := createClient(t) var expected []Meta folder1 := shipper.storageConfig.NamedStores.Filesystem["folder-1"].Directory @@ -99,12 +99,12 @@ func Test_BloomClient_PutMeta(t *testing.T) { } for name, data := range tests { t.Run(name, func(t *testing.T) { - shipper := createShipper(t) + bloomClient := createClient(t) - err := shipper.PutMeta(context.Background(), data.source) + err := bloomClient.PutMeta(context.Background(), data.source) require.NoError(t, err) - directory := shipper.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory + directory := bloomClient.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory filePath := filepath.Join(directory, data.expectedFilePath) require.FileExists(t, filePath) content, err := os.ReadFile(filePath) @@ -155,15 +155,15 @@ func Test_BloomClient_DeleteMeta(t *testing.T) { } for name, data := range tests { t.Run(name, func(t *testing.T) { - shipper := createShipper(t) - directory := shipper.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory + bloomClient := createClient(t) + directory := bloomClient.storageConfig.NamedStores.Filesystem[data.expectedStorage].Directory file := filepath.Join(directory, data.expectedFilePath) err := os.MkdirAll(file[:strings.LastIndex(file, delimiter)], 0755) require.NoError(t, err) err = os.WriteFile(file, []byte("dummy content"), 0700) require.NoError(t, err) - err = shipper.DeleteMeta(context.Background(), data.source) + err = bloomClient.DeleteMeta(context.Background(), data.source) require.NoError(t, err) require.NoFileExists(t, file) @@ -173,8 +173,8 @@ func Test_BloomClient_DeleteMeta(t *testing.T) { } func Test_BloomClient_GetBlocks(t *testing.T) { - shipper := createShipper(t) - fsNamedStores := shipper.storageConfig.NamedStores.Filesystem + bloomClient := createClient(t) + fsNamedStores := bloomClient.storageConfig.NamedStores.Filesystem firstBlockPath := "bloom/first-period-19621/tenantA/blooms/eeee-ffff/1695272400-1695276000-1" firstBlockFullPath := filepath.Join(fsNamedStores["folder-1"].Directory, firstBlockPath) firstBlockData := createBlockFile(t, firstBlockFullPath) @@ -209,44 +209,21 @@ func Test_BloomClient_GetBlocks(t *testing.T) { BlockPath: secondBlockPath, } - blocksToDownload := []BlockRef{firstBlockRef, secondBlockRef} - - blocksCh, errorsCh := shipper.GetBlocks(context.Background(), blocksToDownload) - blocks := make(map[string]string) - func() { - timout := time.After(5 * time.Second) - for { - select { - case <-timout: - t.Fatalf("the test had to be completed before the timeout") - return - case err := <-errorsCh: - require.NoError(t, err) - case block, ok := <-blocksCh: - if !ok { - return - } - blockData, err := io.ReadAll(block.Data) - require.NoError(t, err) - blocks[block.BlockRef.BlockPath] = string(blockData) - - } - } - }() - - firstBlockActualData, exists := blocks[firstBlockRef.BlockPath] - require.Truef(t, exists, "data for the first block must be present in the results: %+v", blocks) - require.Equal(t, firstBlockData, firstBlockActualData) - - secondBlockActualData, exists := blocks[secondBlockRef.BlockPath] - require.True(t, exists, "data for the second block must be present in the results: %+v", blocks) - require.Equal(t, secondBlockData, secondBlockActualData) + downloadedFirstBlock, err := bloomClient.GetBlock(context.Background(), firstBlockRef) + require.NoError(t, err) + firstBlockActualData, err := io.ReadAll(downloadedFirstBlock.Data) + require.NoError(t, err) + require.Equal(t, firstBlockData, string(firstBlockActualData)) - require.Len(t, blocks, 2) + downloadedSecondBlock, err := bloomClient.GetBlock(context.Background(), secondBlockRef) + require.NoError(t, err) + secondBlockActualData, err := io.ReadAll(downloadedSecondBlock.Data) + require.NoError(t, err) + require.Equal(t, secondBlockData, string(secondBlockActualData)) } func Test_BloomClient_PutBlocks(t *testing.T) { - shipper := createShipper(t) + bloomClient := createClient(t) blockForFirstFolderData := "data1" blockForFirstFolder := Block{ BlockRef: BlockRef{ @@ -281,7 +258,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { Data: aws_io.ReadSeekNopCloser{ReadSeeker: bytes.NewReader([]byte(blockForSecondFolderData))}, } - results, err := shipper.PutBlocks(context.Background(), []Block{blockForFirstFolder, blockForSecondFolder}) + results, err := bloomClient.PutBlocks(context.Background(), []Block{blockForFirstFolder, blockForSecondFolder}) require.NoError(t, err) require.Len(t, results, 2) firstResultBlock := results[0] @@ -295,7 +272,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { require.Equal(t, blockForFirstFolder.EndTimestamp, firstResultBlock.EndTimestamp) require.Equal(t, blockForFirstFolder.Checksum, firstResultBlock.Checksum) require.Equal(t, blockForFirstFolder.IndexPath, firstResultBlock.IndexPath) - folder1 := shipper.storageConfig.NamedStores.Filesystem["folder-1"].Directory + folder1 := bloomClient.storageConfig.NamedStores.Filesystem["folder-1"].Directory savedFilePath := filepath.Join(folder1, path) require.FileExists(t, savedFilePath) savedData, err := os.ReadFile(savedFilePath) @@ -313,7 +290,7 @@ func Test_BloomClient_PutBlocks(t *testing.T) { require.Equal(t, blockForSecondFolder.EndTimestamp, secondResultBlock.EndTimestamp) require.Equal(t, blockForSecondFolder.Checksum, secondResultBlock.Checksum) require.Equal(t, blockForSecondFolder.IndexPath, secondResultBlock.IndexPath) - folder2 := shipper.storageConfig.NamedStores.Filesystem["folder-2"].Directory + folder2 := bloomClient.storageConfig.NamedStores.Filesystem["folder-2"].Directory savedFilePath = filepath.Join(folder2, path) require.FileExists(t, savedFilePath) @@ -323,8 +300,8 @@ func Test_BloomClient_PutBlocks(t *testing.T) { } func Test_BloomClient_DeleteBlocks(t *testing.T) { - shipper := createShipper(t) - fsNamedStores := shipper.storageConfig.NamedStores.Filesystem + bloomClient := createClient(t) + fsNamedStores := bloomClient.storageConfig.NamedStores.Filesystem block1Path := filepath.Join(fsNamedStores["folder-1"].Directory, "bloom/first-period-19621/tenantA/blooms/eeee-ffff/1695272400-1695276000-1") createBlockFile(t, block1Path) block2Path := filepath.Join(fsNamedStores["folder-2"].Directory, "bloom/second-period-19624/tenantA/blooms/aaaa-bbbb/1695531600-1695535200-2") @@ -358,7 +335,7 @@ func Test_BloomClient_DeleteBlocks(t *testing.T) { IndexPath: uuid.New().String(), }, } - err := shipper.DeleteBlocks(context.Background(), blocksToDelete) + err := bloomClient.DeleteBlocks(context.Background(), blocksToDelete) require.NoError(t, err) require.NoFileExists(t, block1Path) require.NoFileExists(t, block2Path) @@ -500,7 +477,7 @@ func Test_createMetaRef(t *testing.T) { } } -func createShipper(t *testing.T) *BloomClient { +func createClient(t *testing.T) *BloomClient { periodicConfigs := createPeriodConfigs() namedStores := storage.NamedStores{ Filesystem: map[string]storage.NamedFSConfig{ @@ -513,9 +490,9 @@ func createShipper(t *testing.T) *BloomClient { metrics := storage.NewClientMetrics() t.Cleanup(metrics.Unregister) - bshipper, err := NewBloomClient(periodicConfigs, storageConfig, metrics) + bloomClient, err := NewBloomClient(periodicConfigs, storageConfig, metrics) require.NoError(t, err) - return bshipper + return bloomClient } func createPeriodConfigs() []config.PeriodConfig { diff --git a/pkg/storage/stores/shipper/bloomshipper/config/config.go b/pkg/storage/stores/shipper/bloomshipper/config/config.go index 7e9ab787ff3ab..748e037ca57c1 100644 --- a/pkg/storage/stores/shipper/bloomshipper/config/config.go +++ b/pkg/storage/stores/shipper/bloomshipper/config/config.go @@ -8,11 +8,23 @@ import ( ) type Config struct { - WorkingDirectory string `yaml:"working_directory"` + WorkingDirectory string `yaml:"working_directory"` + BlocksDownloadingQueue DownloadingQueueConfig `yaml:"blocks_downloading_queue"` +} + +type DownloadingQueueConfig struct { + WorkersCount int `yaml:"workers_count"` + MaxTasksEnqueuedPerTenant int `yaml:"max_tasks_enqueued_per_tenant"` +} + +func (cfg *DownloadingQueueConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.IntVar(&cfg.WorkersCount, prefix+"workers-count", 100, "The count of parallel workers that download Bloom Blocks.") + f.IntVar(&cfg.MaxTasksEnqueuedPerTenant, prefix+"max_tasks_enqueued_per_tenant", 10_000, "Maximum number of task in queue per tenant per bloom-gateway. Enqueuing the tasks above this limit will fail an error.") } func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&c.WorkingDirectory, prefix+"shipper.working-directory", "bloom-shipper", "Working directory to store downloaded Bloom Blocks.") + c.BlocksDownloadingQueue.RegisterFlagsWithPrefix(prefix+"shipper.blocks-downloading-queue.", f) } func (c *Config) Validate() error { diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go index 2df1f41cd4a25..98dbbb20a476a 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go @@ -4,32 +4,38 @@ import ( "cmp" "context" "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" - v1 "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config" ) type Shipper struct { - client Client - config config.Config - logger log.Logger + client Client + config config.Config + logger log.Logger + blockDownloader *blockDownloader } -func NewShipper(client Client, config config.Config, logger log.Logger) (*Shipper, error) { +type Limits interface { + BloomGatewayBlocksDownloadingParallelism(tenantID string) int +} + +func NewShipper(client Client, config config.Config, limits Limits, logger log.Logger, reg prometheus.Registerer) (*Shipper, error) { + logger = log.With(logger, "component", "bloom-shipper") + downloader, err := newBlockDownloader(config, client, limits, logger, reg) + if err != nil { + return nil, fmt.Errorf("error creating block downloader: %w", err) + } return &Shipper{ - client: client, - config: config, - logger: log.With(logger, "component", "bloom-shipper"), + client: client, + config: config, + logger: logger, + blockDownloader: downloader, }, nil } @@ -47,21 +53,18 @@ func (s *Shipper) ForEachBlock( return fmt.Errorf("error fetching active block references : %w", err) } - blocksChannel, errorsChannel := s.client.GetBlocks(ctx, blockRefs) + cancelContext, cancelFunc := context.WithCancel(ctx) + defer cancelFunc() + blocksChannel, errorsChannel := s.blockDownloader.downloadBlocks(cancelContext, tenantID, blockRefs) for { select { - case block, ok := <-blocksChannel: + case result, ok := <-blocksChannel: if !ok { return nil } - directory, err := s.extractBlock(&block, time.Now().UTC()) + err = callback(result.BlockQuerier) if err != nil { - return fmt.Errorf("error unarchiving block %s err: %w", block.BlockPath, err) - } - blockQuerier := s.createBlockQuerier(directory) - err = callback(blockQuerier) - if err != nil { - return fmt.Errorf("error running callback function for block %s err: %w", block.BlockPath, err) + return fmt.Errorf("error running callback function for block %s err: %w", result.BlockPath, err) } case err := <-errorsChannel: if err != nil { @@ -73,6 +76,7 @@ func (s *Shipper) ForEachBlock( func (s *Shipper) Stop() { s.client.Stop() + s.blockDownloader.stop() } // getFromThrough returns the first and list item of a fingerprint slice @@ -177,55 +181,3 @@ func isOutsideRange(b *BlockRef, startTimestamp, endTimestamp int64, fingerprint } return b.MaxFingerprint < fingerprints[idx] } - -// extract the files into directory and returns absolute path to this directory. -func (s *Shipper) extractBlock(block *Block, ts time.Time) (string, error) { - workingDirectoryPath := filepath.Join(s.config.WorkingDirectory, block.BlockPath, strconv.FormatInt(ts.UnixMilli(), 10)) - err := os.MkdirAll(workingDirectoryPath, os.ModePerm) - if err != nil { - return "", fmt.Errorf("can not create directory to extract the block: %w", err) - } - archivePath, err := writeDataToTempFile(workingDirectoryPath, block) - if err != nil { - return "", fmt.Errorf("error writing data to temp file: %w", err) - } - defer func() { - os.Remove(archivePath) - // todo log err - }() - err = extractArchive(archivePath, workingDirectoryPath) - if err != nil { - return "", fmt.Errorf("error extracting archive: %w", err) - } - return workingDirectoryPath, nil -} - -func (s *Shipper) createBlockQuerier(directory string) *v1.BlockQuerier { - reader := v1.NewDirectoryBlockReader(directory) - block := v1.NewBlock(reader) - return v1.NewBlockQuerier(block) -} - -func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, error) { - defer block.Data.Close() - archivePath := filepath.Join(workingDirectoryPath, block.BlockPath[strings.LastIndex(block.BlockPath, delimiter)+1:]) - - archiveFile, err := os.Create(archivePath) - if err != nil { - return "", fmt.Errorf("error creating empty file to store the archiver: %w", err) - } - defer archiveFile.Close() - _, err = io.Copy(archiveFile, block.Data) - if err != nil { - return "", fmt.Errorf("error writing data to archive file: %w", err) - } - return archivePath, nil -} - -func extractArchive(archivePath string, workingDirectoryPath string) error { - file, err := os.Open(archivePath) - if err != nil { - return fmt.Errorf("error opening archive file %s: %w", file.Name(), err) - } - return v1.UnTarGz(workingDirectoryPath, file) -} diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go index 45450c0e3838b..17f21793680ca 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go @@ -1,21 +1,11 @@ package bloomshipper import ( - "bytes" "fmt" - "io" "math" - "os" - "path/filepath" - "strconv" "testing" - "time" - "github.com/google/uuid" "github.com/stretchr/testify/require" - - v1 "github.com/grafana/loki/pkg/storage/bloom/v1" - "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config" ) func Test_Shipper_findBlocks(t *testing.T) { @@ -208,61 +198,3 @@ func createBlockRef( BlockPath: blockPath, } } - -const ( - bloomFileName = "bloom" - seriesFileName = "series" -) - -func Test_Shipper_extractBlock(t *testing.T) { - dir := t.TempDir() - - mockBlockDir := filepath.Join(dir, "mock-block-dir") - err := os.MkdirAll(mockBlockDir, 0777) - require.NoError(t, err) - bloomFile, err := os.Create(filepath.Join(mockBlockDir, bloomFileName)) - require.NoError(t, err) - bloomFileContent := uuid.NewString() - _, err = io.Copy(bloomFile, bytes.NewReader([]byte(bloomFileContent))) - require.NoError(t, err) - - seriesFile, err := os.Create(filepath.Join(mockBlockDir, seriesFileName)) - require.NoError(t, err) - seriesFileContent := uuid.NewString() - _, err = io.Copy(seriesFile, bytes.NewReader([]byte(seriesFileContent))) - require.NoError(t, err) - - blockFilePath := filepath.Join(dir, "test-block-archive") - file, err := os.OpenFile(blockFilePath, os.O_CREATE|os.O_RDWR, 0700) - require.NoError(t, err) - err = v1.TarGz(file, v1.NewDirectoryBlockReader(mockBlockDir)) - require.NoError(t, err) - - blockFile, err := os.OpenFile(blockFilePath, os.O_RDONLY, 0700) - require.NoError(t, err) - - workingDir := t.TempDir() - shipper := Shipper{config: config.Config{WorkingDirectory: workingDir}} - ts := time.Now().UTC() - block := Block{ - BlockRef: BlockRef{BlockPath: "first-period-19621/tenantA/metas/ff-fff-1695272400-1695276000-aaa"}, - Data: blockFile, - } - - actualPath, err := shipper.extractBlock(&block, ts) - - require.NoError(t, err) - expectedPath := filepath.Join(workingDir, block.BlockPath, strconv.FormatInt(ts.UnixMilli(), 10)) - require.Equal(t, expectedPath, actualPath, - "expected archive to be extracted to working directory under the same path as blockPath and with timestamp suffix") - require.FileExists(t, filepath.Join(expectedPath, bloomFileName)) - require.FileExists(t, filepath.Join(expectedPath, seriesFileName)) - - actualBloomFileContent, err := os.ReadFile(filepath.Join(expectedPath, bloomFileName)) - require.NoError(t, err) - require.Equal(t, bloomFileContent, string(actualBloomFileContent)) - - actualSeriesFileContent, err := os.ReadFile(filepath.Join(expectedPath, seriesFileName)) - require.NoError(t, err) - require.Equal(t, seriesFileContent, string(actualSeriesFileContent)) -} diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 250fa9575c470..c4e38a898d2c6 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -182,13 +182,14 @@ type Limits struct { BloomGatewayShardSize int `yaml:"bloom_gateway_shard_size" json:"bloom_gateway_shard_size"` BloomGatewayEnabled bool `yaml:"bloom_gateway_enable_filtering" json:"bloom_gateway_enable_filtering"` - BloomCompactorShardSize int `yaml:"bloom_compactor_shard_size" json:"bloom_compactor_shard_size"` - BloomCompactorMaxTableAge time.Duration `yaml:"bloom_compactor_max_table_age" json:"bloom_compactor_max_table_age"` - BloomCompactorMinTableAge time.Duration `yaml:"bloom_compactor_min_table_age" json:"bloom_compactor_min_table_age"` - BloomCompactorEnabled bool `yaml:"bloom_compactor_enable_compaction" json:"bloom_compactor_enable_compaction"` - BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length"` - BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip"` - BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate"` + BloomCompactorShardSize int `yaml:"bloom_compactor_shard_size" json:"bloom_compactor_shard_size"` + BloomCompactorMaxTableAge time.Duration `yaml:"bloom_compactor_max_table_age" json:"bloom_compactor_max_table_age"` + BloomCompactorMinTableAge time.Duration `yaml:"bloom_compactor_min_table_age" json:"bloom_compactor_min_table_age"` + BloomCompactorEnabled bool `yaml:"bloom_compactor_enable_compaction" json:"bloom_compactor_enable_compaction"` + BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length"` + BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip"` + BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate"` + BloomGatewayBlocksDownloadingParallelism int `yaml:"bloom_gateway_blocks_downloading_parallelism" json:"bloom_gateway_blocks_downloading_parallelism"` AllowStructuredMetadata bool `yaml:"allow_structured_metadata,omitempty" json:"allow_structured_metadata,omitempty" doc:"description=Allow user to send structured metadata in push payload."` MaxStructuredMetadataSize flagext.ByteSize `yaml:"max_structured_metadata_size" json:"max_structured_metadata_size" doc:"description=Maximum size accepted for structured metadata per log line."` @@ -309,6 +310,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.BloomNGramLength, "bloom-compactor.ngram-length", 4, "Length of the n-grams created when computing blooms from log lines.") f.IntVar(&l.BloomNGramSkip, "bloom-compactor.ngram-skip", 0, "Skip factor for the n-grams created when computing blooms from log lines.") f.Float64Var(&l.BloomFalsePositiveRate, "bloom-compactor.false-positive-rate", 0.01, "Scalable Bloom Filter desired false-positive rate.") + f.IntVar(&l.BloomGatewayBlocksDownloadingParallelism, "bloom-gateway.blocks-downloading-parallelism", 50, "Maximum number of blocks will be downloaded in parallel by the Bloom Gateway.") l.ShardStreams = &shardstreams.Config{} l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f) @@ -788,6 +790,10 @@ func (o *Overrides) BloomGatewayShardSize(userID string) int { return o.getOverridesForUser(userID).BloomGatewayShardSize } +func (o *Overrides) BloomGatewayBlocksDownloadingParallelism(userID string) int { + return o.getOverridesForUser(userID).BloomGatewayBlocksDownloadingParallelism +} + func (o *Overrides) BloomGatewayEnabled(userID string) bool { return o.getOverridesForUser(userID).BloomGatewayEnabled } From e523809216086b4fa4cb9f0c5a058b273bc5dbbf Mon Sep 17 00:00:00 2001 From: Danny Kopping Date: Fri, 24 Nov 2023 16:20:27 +0200 Subject: [PATCH 46/48] bug(volume): return 400 error for invalid volume request (#11313) **What this PR does / why we need it**: Making a request to the `/volume{,_range}` endpoints results in a 500 is a LogQL expression is given instead of the expected label matchers. The reason for this is that the error is not wrapped in such a way as to be interpreted by `ClientHTTPStatusAndError` correctly. 5xx responses from the querier are automatically retried, so any invalid request currently results in 5 retries which are unnecessary. Signed-off-by: Danny Kopping --- clients/pkg/logentry/logql/parser.go | 5 +++-- pkg/logql/syntax/parser.go | 2 +- pkg/logqlmodel/error.go | 1 + pkg/util/server/error.go | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/clients/pkg/logentry/logql/parser.go b/clients/pkg/logentry/logql/parser.go index e5ee61227a9f7..d567f6fce4c8b 100644 --- a/clients/pkg/logentry/logql/parser.go +++ b/clients/pkg/logentry/logql/parser.go @@ -1,13 +1,14 @@ package logql import ( - "errors" "fmt" "strconv" "strings" "text/scanner" "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logqlmodel" ) func init() { @@ -44,7 +45,7 @@ func ParseMatchers(input string) ([]*labels.Matcher, error) { } matcherExpr, ok := expr.(*matchersExpr) if !ok { - return nil, errors.New("only label matchers is supported") + return nil, logqlmodel.ErrParseMatchers } return matcherExpr.matchers, nil } diff --git a/pkg/logql/syntax/parser.go b/pkg/logql/syntax/parser.go index 81874ba6d6c41..710bf7132c4c8 100644 --- a/pkg/logql/syntax/parser.go +++ b/pkg/logql/syntax/parser.go @@ -146,7 +146,7 @@ func ParseMatchers(input string, validate bool) ([]*labels.Matcher, error) { } matcherExpr, ok := expr.(*MatchersExpr) if !ok { - return nil, errors.New("only label matchers is supported") + return nil, logqlmodel.ErrParseMatchers } return matcherExpr.Mts, nil } diff --git a/pkg/logqlmodel/error.go b/pkg/logqlmodel/error.go index 9491a8f3342c1..68ddf72cc2f2d 100644 --- a/pkg/logqlmodel/error.go +++ b/pkg/logqlmodel/error.go @@ -15,6 +15,7 @@ var ( ErrLimit = errors.New("limit reached while evaluating the query") ErrIntervalLimit = errors.New("[interval] value exceeds limit") ErrBlocked = errors.New("query blocked by policy") + ErrParseMatchers = errors.New("only label matchers are supported") ErrorLabel = "__error__" PreserveErrorLabel = "__preserve_error__" ErrorDetailsLabel = "__error_details__" diff --git a/pkg/util/server/error.go b/pkg/util/server/error.go index ef4dedec93094..df2beaea2b0b5 100644 --- a/pkg/util/server/error.go +++ b/pkg/util/server/error.go @@ -56,7 +56,7 @@ func ClientHTTPStatusAndError(err error) (int, error) { return http.StatusGatewayTimeout, errors.New(ErrDeadlineExceeded) case errors.As(err, &queryErr): return http.StatusBadRequest, err - case errors.Is(err, logqlmodel.ErrLimit) || errors.Is(err, logqlmodel.ErrParse) || errors.Is(err, logqlmodel.ErrPipeline) || errors.Is(err, logqlmodel.ErrBlocked): + case errors.Is(err, logqlmodel.ErrLimit) || errors.Is(err, logqlmodel.ErrParse) || errors.Is(err, logqlmodel.ErrPipeline) || errors.Is(err, logqlmodel.ErrBlocked) || errors.Is(err, logqlmodel.ErrParseMatchers): return http.StatusBadRequest, err case errors.Is(err, user.ErrNoOrgID): return http.StatusBadRequest, err From 09cb9ae76f4aef7dea477961c0c5424d7243bf2a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 24 Nov 2023 14:41:01 +0000 Subject: [PATCH 47/48] fix(deps): update github.com/grafana/loki/pkg/push digest to e523809 (main) (#11107) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate logo banner](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [github.com/grafana/loki/pkg/push](https://togithub.com/grafana/loki) | require | digest | `0a7737e` -> `e523809` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/grafana/loki). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- vendor/modules.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index cf7b393e8b44d..75c6f559393fd 100644 --- a/go.mod +++ b/go.mod @@ -123,7 +123,7 @@ require ( github.com/efficientgo/core v1.0.0-rc.2 github.com/fsnotify/fsnotify v1.6.0 github.com/gogo/googleapis v1.4.0 - github.com/grafana/loki/pkg/push v0.0.0-20231023154132-0a7737e7c7eb + github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 github.com/heroku/x v0.0.61 github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0 diff --git a/vendor/modules.txt b/vendor/modules.txt index 76a30c5a6689e..bd4bf7d795a33 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -903,7 +903,7 @@ github.com/grafana/go-gelf/v2/gelf # github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache -# github.com/grafana/loki/pkg/push v0.0.0-20231023154132-0a7737e7c7eb => ./pkg/push +# github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 => ./pkg/push ## explicit; go 1.19 github.com/grafana/loki/pkg/push # github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd => github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd From d62d4e37d1f3dba83cf10a1f6db82830794e1c05 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Fri, 24 Nov 2023 15:56:42 +0100 Subject: [PATCH 48/48] Bloom Gateway: Implement chunk filtering using workers that multiplex requests (#11181) This change adds an internal request queue to the bloom gateway. Instead of executing every single request individually, which involves resolving bloom blocks, downloading them if needed and executing the chunk filtering, requests are now enqueued to the internal, per-tenant queue. The queue implements the same shuffle sharding mechanism as the queue in the query scheduler component. Workers then dequeue a batch of requests for a single tenant and multiplex them into a single processing task for each day. This has the big advantage that the chunks of multiple requests can be processed in a single sequential scan through a set a bloom blocks, without needing to skip back and forth within the binary stream of the block. --------- Signed-off-by: Christian Haudum --- pkg/bloomgateway/bloomgateway.go | 217 ++++++++--------- pkg/bloomgateway/bloomgateway_test.go | 218 +++++++++++++---- pkg/bloomgateway/multiplexing.go | 221 +++++++++++++++++ pkg/bloomgateway/multiplexing_test.go | 203 ++++++++++++++++ pkg/bloomgateway/util.go | 167 +++++++++++++ pkg/bloomgateway/util_test.go | 84 +++++++ pkg/bloomgateway/worker.go | 227 ++++++++++++++++++ pkg/querier/worker/worker.go | 3 +- pkg/queue/queue.go | 37 +++ pkg/queue/util.go | 25 ++ pkg/storage/bloom/v1/builder_test.go | 38 --- pkg/storage/bloom/v1/fuse.go | 121 +++------- pkg/storage/bloom/v1/fuse_test.go | 101 +++----- pkg/storage/bloom/v1/test_util.go | 81 +++++++ .../stores/shipper/bloomshipper/client.go | 14 +- .../stores/shipper/bloomshipper/shipper.go | 57 +++-- .../stores/shipper/bloomshipper/store.go | 120 ++++----- 17 files changed, 1487 insertions(+), 447 deletions(-) create mode 100644 pkg/bloomgateway/multiplexing.go create mode 100644 pkg/bloomgateway/multiplexing_test.go create mode 100644 pkg/bloomgateway/util.go create mode 100644 pkg/bloomgateway/util_test.go create mode 100644 pkg/bloomgateway/worker.go create mode 100644 pkg/queue/util.go create mode 100644 pkg/storage/bloom/v1/test_util.go diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index 425d6713e92f9..6448e45324f32 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -23,6 +23,10 @@ of line filter expressions. | bloomgateway.Gateway | + queue.RequestQueue + | + bloomgateway.Worker + | bloomshipper.Store | bloomshipper.Shipper @@ -56,6 +60,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/queue" "github.com/grafana/loki/pkg/storage" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/pkg/util" @@ -63,7 +68,6 @@ import ( ) var errGatewayUnhealthy = errors.New("bloom-gateway is unhealthy in the ring") -var errInvalidTenant = errors.New("invalid tenant in chunk refs") // TODO(chaudum): Make these configurable const ( @@ -72,22 +76,26 @@ const ( pendingTasksInitialCap = 1024 ) +const ( + metricsSubsystem = "bloom_gateway" +) + type metrics struct { queueDuration prometheus.Histogram inflightRequests prometheus.Summary } -func newMetrics(subsystem string, registerer prometheus.Registerer) *metrics { +func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) *metrics { return &metrics{ queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Namespace: constants.Loki, + Namespace: namespace, Subsystem: subsystem, Name: "queue_duration_seconds", Help: "Time spent by tasks in queue before getting picked up by a worker.", Buckets: prometheus.DefBuckets, }), inflightRequests: promauto.With(registerer).NewSummary(prometheus.SummaryOpts{ - Namespace: constants.Loki, + Namespace: namespace, Subsystem: subsystem, Name: "inflight_tasks", Help: "Number of inflight tasks (either queued or processing) sampled at a regular interval. Quantile buckets keep track of inflight tasks over the last 60s.", @@ -98,40 +106,6 @@ func newMetrics(subsystem string, registerer prometheus.Registerer) *metrics { } } -// Task is the data structure that is enqueued to the internal queue and queued by query workers -type Task struct { - // ID is a lexcographically sortable unique identifier of the task - ID ulid.ULID - // Tenant is the tenant ID - Tenant string - // Request is the original request - Request *logproto.FilterChunkRefRequest - // ErrCh is a send-only channel to write an error to - ErrCh chan<- error - // ResCh is a send-only channel to write partial responses to - ResCh chan<- *logproto.GroupedChunkRefs -} - -// newTask returns a new Task that can be enqueued to the task queue. -// As additional arguments, it returns a result and an error channel, as well -// as an error if the instantiation fails. -func newTask(tenantID string, req *logproto.FilterChunkRefRequest) (Task, chan *logproto.GroupedChunkRefs, chan error, error) { - key, err := ulid.New(ulid.Now(), nil) - if err != nil { - return Task{}, nil, nil, err - } - errCh := make(chan error, 1) - resCh := make(chan *logproto.GroupedChunkRefs, 1) - task := Task{ - ID: key, - Tenant: tenantID, - Request: req, - ErrCh: errCh, - ResCh: resCh, - } - return task, resCh, errCh, nil -} - // SyncMap is a map structure which can be synchronized using the RWMutex type SyncMap[k comparable, v any] struct { sync.RWMutex @@ -169,14 +143,16 @@ func makePendingTasks(n int) *pendingTasks { type Gateway struct { services.Service - cfg Config - logger log.Logger - metrics *metrics + cfg Config + logger log.Logger - queue *queue.RequestQueue - queueMetrics *queue.Metrics - activeUsers *util.ActiveUsersCleanupService - bloomStore bloomshipper.Store + metrics *metrics + workerMetrics *workerMetrics + queueMetrics *queue.Metrics + + queue *queue.RequestQueue + activeUsers *util.ActiveUsersCleanupService + bloomStore bloomshipper.Store sharding ShardingStrategy @@ -184,6 +160,8 @@ type Gateway struct { serviceMngr *services.Manager serviceWatcher *services.FailureWatcher + + workerConfig workerConfig } // New returns a new instance of the Bloom Gateway. @@ -191,12 +169,17 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, o g := &Gateway{ cfg: cfg, logger: logger, - metrics: newMetrics("bloom_gateway", reg), + metrics: newMetrics(reg, constants.Loki, metricsSubsystem), sharding: shardingStrategy, pendingTasks: makePendingTasks(pendingTasksInitialCap), + workerConfig: workerConfig{ + maxWaitTime: 200 * time.Millisecond, + maxItems: 100, + }, + workerMetrics: newWorkerMetrics(reg, constants.Loki, metricsSubsystem), + queueMetrics: queue.NewMetrics(reg, constants.Loki, metricsSubsystem), } - g.queueMetrics = queue.NewMetrics(reg, constants.Loki, "bloom_gateway") g.queue = queue.NewRequestQueue(maxTasksPerTenant, time.Minute, g.queueMetrics) g.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(g.queueMetrics.Cleanup) @@ -215,19 +198,32 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, o return nil, err } + // We need to keep a reference to be able to call Stop() on shutdown of the gateway. g.bloomStore = bloomStore + if err := g.initServices(); err != nil { + return nil, err + } + g.Service = services.NewBasicService(g.starting, g.running, g.stopping).WithName("bloom-gateway") + + return g, nil +} + +func (g *Gateway) initServices() error { + var err error svcs := []services.Service{g.queue, g.activeUsers} + for i := 0; i < numWorkers; i++ { + id := fmt.Sprintf("bloom-query-worker-%d", i) + w := newWorker(id, g.workerConfig, g.queue, g.bloomStore, g.pendingTasks, g.logger, g.workerMetrics) + svcs = append(svcs, w) + } g.serviceMngr, err = services.NewManager(svcs...) if err != nil { - return nil, err + return err } g.serviceWatcher = services.NewFailureWatcher() g.serviceWatcher.WatchManager(g.serviceMngr) - - g.Service = services.NewBasicService(g.starting, g.running, g.stopping).WithName("bloom-gateway") - - return g, nil + return nil } func (g *Gateway) starting(ctx context.Context) error { @@ -245,10 +241,6 @@ func (g *Gateway) starting(ctx context.Context) error { return errors.Wrap(err, "unable to start bloom gateway subservices") } - for i := 0; i < numWorkers; i++ { - go g.startWorker(ctx, fmt.Sprintf("worker-%d", i)) - } - return nil } @@ -278,52 +270,6 @@ func (g *Gateway) stopping(_ error) error { return services.StopManagerAndAwaitStopped(context.Background(), g.serviceMngr) } -// This is just a dummy implementation of the worker! -// TODO(chaudum): Implement worker that dequeues multiple pending tasks and -// multiplexes them prior to execution. -func (g *Gateway) startWorker(_ context.Context, id string) error { - level.Info(g.logger).Log("msg", "starting worker", "worker", id) - - g.queue.RegisterConsumerConnection(id) - defer g.queue.UnregisterConsumerConnection(id) - - idx := queue.StartIndexWithLocalQueue - - for { - ctx := context.Background() - item, newIdx, err := g.queue.Dequeue(ctx, idx, id) - if err != nil { - if err != queue.ErrStopped { - level.Error(g.logger).Log("msg", "failed to dequeue task", "worker", id, "err", err) - continue - } - level.Info(g.logger).Log("msg", "stopping worker", "worker", id) - return err - } - task, ok := item.(Task) - if !ok { - level.Error(g.logger).Log("msg", "failed to cast to Task", "item", item) - continue - } - - idx = newIdx - level.Info(g.logger).Log("msg", "dequeued task", "worker", id, "task", task.ID) - g.pendingTasks.Delete(task.ID) - - r := task.Request - if len(r.Filters) > 0 { - r.Refs, err = g.bloomStore.FilterChunkRefs(ctx, task.Tenant, r.From.Time(), r.Through.Time(), r.Refs, r.Filters...) - } - if err != nil { - task.ErrCh <- err - } else { - for _, ref := range r.Refs { - task.ResCh <- ref - } - } - } -} - // FilterChunkRefs implements BloomGatewayServer func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunkRefRequest) (*logproto.FilterChunkRefResponse, error) { tenantID, err := tenant.TenantID(ctx) @@ -331,10 +277,11 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk return nil, err } - for _, ref := range req.Refs { - if ref.Tenant != tenantID { - return nil, errors.Wrapf(errInvalidTenant, "expected chunk refs from tenant %s, got tenant %s", tenantID, ref.Tenant) - } + // Shortcut if request does not contain filters + if len(req.Filters) == 0 { + return &logproto.FilterChunkRefResponse{ + ChunkRefs: req.Refs, + }, nil } // Sort ChunkRefs by fingerprint in ascending order @@ -342,7 +289,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk return req.Refs[i].Fingerprint < req.Refs[j].Fingerprint }) - task, resCh, errCh, err := newTask(tenantID, req) + task, resCh, errCh, err := NewTask(tenantID, req) if err != nil { return nil, err } @@ -354,19 +301,61 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk g.pendingTasks.Add(task.ID, task) }) - response := make([]*logproto.GroupedChunkRefs, 0, len(req.Refs)) + requestCount := len(req.Refs) + // TODO(chaudum): Use pool + responses := make([]v1.Output, 0, requestCount) + for { select { case <-ctx.Done(): - return nil, ctx.Err() + return nil, errors.Wrap(ctx.Err(), "waiting for results") case err := <-errCh: - return nil, err + return nil, errors.Wrap(err, "waiting for results") case res := <-resCh: - level.Info(g.logger).Log("msg", "got result", "task", task.ID, "tenant", tenantID, "res", res) + responses = append(responses, res) + // log line is helpful for debugging tests + // level.Debug(g.logger).Log("msg", "got partial result", "task", task.ID, "tenant", tenantID, "fp", uint64(res.Fp), "chunks", res.Removals.Len(), "progress", fmt.Sprintf("%d/%d", len(responses), requestCount)) // wait for all parts of the full response - response = append(response, res) - if len(response) == len(req.Refs) { - return &logproto.FilterChunkRefResponse{ChunkRefs: response}, nil + if len(responses) == requestCount { + for _, o := range responses { + if res.Removals.Len() == 0 { + continue + } + // we must not remove items from req.Refs as long as the worker may iterater over them + g.removeNotMatchingChunks(req, o) + } + return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil + } + } + } +} + +func (g *Gateway) removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output) { + // binary search index of fingerprint + idx := sort.Search(len(req.Refs), func(i int) bool { + return req.Refs[i].Fingerprint >= uint64(res.Fp) + }) + + // fingerprint not found + if idx >= len(req.Refs) { + level.Error(g.logger).Log("msg", "index out of range", "idx", idx, "len", len(req.Refs), "fp", uint64(res.Fp)) + return + } + + // if all chunks of a fingerprint are are removed + // then remove the whole group from the response + if len(req.Refs[idx].Refs) == res.Removals.Len() { + req.Refs[idx] = nil // avoid leaking pointer + req.Refs = append(req.Refs[:idx], req.Refs[idx+1:]...) + return + } + + for i := range res.Removals { + toRemove := res.Removals[i] + for j := range req.Refs[idx].Refs { + if toRemove.Checksum == req.Refs[idx].Refs[j].Checksum { + req.Refs[idx].Refs[j] = nil // avoid leaking pointer + req.Refs[idx].Refs = append(req.Refs[idx].Refs[:j], req.Refs[idx].Refs[j+1:]...) } } } diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index 0b6a207362ac6..a294500ce27bd 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -2,11 +2,13 @@ package bloomgateway import ( "context" + "fmt" "os" "testing" "time" "github.com/go-kit/log" + "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/kv/consul" "github.com/grafana/dskit/ring" @@ -18,9 +20,12 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/storage" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/chunk/client/local" "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" lokiring "github.com/grafana/loki/pkg/util/ring" + "github.com/grafana/loki/pkg/validation" ) func parseDayTime(s string) config.DayTime { @@ -33,17 +38,35 @@ func parseDayTime(s string) config.DayTime { } } +func mktime(s string) model.Time { + ts, err := time.Parse("2006-01-02 15:04", s) + if err != nil { + panic(err) + } + return model.TimeFromUnix(ts.Unix()) +} + func groupRefs(t *testing.T, chunkRefs []*logproto.ChunkRef) []*logproto.GroupedChunkRefs { t.Helper() grouped := make([]*logproto.GroupedChunkRefs, 0, len(chunkRefs)) return groupChunkRefs(chunkRefs, grouped) } +func newLimits() *validation.Overrides { + limits := validation.Limits{} + flagext.DefaultValues(&limits) + limits.BloomGatewayEnabled = true + + overrides, _ := validation.NewOverrides(limits, nil) + return overrides +} + func TestBloomGateway_StartStopService(t *testing.T) { ss := NewNoopStrategy() logger := log.NewNopLogger() reg := prometheus.NewRegistry() + limits := newLimits() cm := storage.NewClientMetrics() t.Cleanup(cm.Unregister) @@ -82,7 +105,7 @@ func TestBloomGateway_StartStopService(t *testing.T) { }, } - gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) + gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) require.NoError(t, err) err = services.StartAndAwaitRunning(context.Background(), gw) @@ -103,6 +126,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { ss := NewNoopStrategy() logger := log.NewLogfmtLogger(os.Stderr) reg := prometheus.NewRegistry() + limits := newLimits() cm := storage.NewClientMetrics() t.Cleanup(cm.Unregister) @@ -142,7 +166,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { t.Run("returns unfiltered chunk refs if no filters provided", func(t *testing.T) { reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) + gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) require.NoError(t, err) err = services.StartAndAwaitRunning(context.Background(), gw) @@ -152,8 +176,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { require.NoError(t, err) }) - ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00") - now := model.TimeFromUnix(ts.Unix()) + now := mktime("2023-10-03 10:00") chunkRefs := []*logproto.ChunkRef{ {Fingerprint: 3000, UserID: tenantID, From: now.Add(-24 * time.Hour), Through: now.Add(-23 * time.Hour), Checksum: 1}, @@ -186,33 +209,9 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { }, res) }) - t.Run("returns error if chunk refs do not belong to tenant", func(t *testing.T) { - reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) - require.NoError(t, err) - - ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00") - now := model.TimeFromUnix(ts.Unix()) - - chunkRefs := []*logproto.ChunkRef{ - {Fingerprint: 1000, UserID: tenantID, From: now.Add(-22 * time.Hour), Through: now.Add(-21 * time.Hour), Checksum: 1}, - {Fingerprint: 2000, UserID: "other", From: now.Add(-20 * time.Hour), Through: now.Add(-19 * time.Hour), Checksum: 2}, - } - req := &logproto.FilterChunkRefRequest{ - From: now.Add(-24 * time.Hour), - Through: now, - Refs: groupRefs(t, chunkRefs), - } - - ctx := user.InjectOrgID(context.Background(), tenantID) - _, err = gw.FilterChunkRefs(ctx, req) - require.Error(t, err) - require.Equal(t, "expected chunk refs from tenant test, got tenant other: invalid tenant in chunk refs", err.Error()) - }) - t.Run("gateway tracks active users", func(t *testing.T) { reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, fakeLimits{}, ss, cm, logger, reg) + gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) require.NoError(t, err) err = services.StartAndAwaitRunning(context.Background(), gw) @@ -222,8 +221,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { require.NoError(t, err) }) - ts, _ := time.Parse("2006-01-02 15:04", "2023-10-03 10:00") - now := model.TimeFromUnix(ts.Unix()) + now := mktime("2023-10-03 10:00") tenants := []string{"tenant-a", "tenant-b", "tenant-c"} for idx, tenantID := range tenants { @@ -240,6 +238,9 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { From: now.Add(-24 * time.Hour), Through: now, Refs: groupRefs(t, chunkRefs), + Filters: []*logproto.LineFilterExpression{ + {Operator: 1, Match: "foo"}, + }, } ctx := user.InjectOrgID(context.Background(), tenantID) _, err = gw.FilterChunkRefs(ctx, req) @@ -247,22 +248,157 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { } require.ElementsMatch(t, tenants, gw.activeUsers.ActiveUsers()) }) + + t.Run("use fuse queriers to filter chunks", func(t *testing.T) { + reg := prometheus.NewRegistry() + gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) + require.NoError(t, err) + + now := mktime("2023-10-03 10:00") + + // replace store implementation and re-initialize workers and sub-services + bqs, data := createBlockQueriers(t, 5, now.Add(-8*time.Hour), now, 0, 1024) + gw.bloomStore = newMockBloomStore(bqs) + err = gw.initServices() + require.NoError(t, err) + + err = services.StartAndAwaitRunning(context.Background(), gw) + require.NoError(t, err) + t.Cleanup(func() { + err = services.StopAndAwaitTerminated(context.Background(), gw) + require.NoError(t, err) + }) + + chunkRefs := createQueryInputFromBlockData(t, tenantID, data, 100) + + t.Run("no match - return empty response", func(t *testing.T) { + inputChunkRefs := groupRefs(t, chunkRefs) + req := &logproto.FilterChunkRefRequest{ + From: now.Add(-8 * time.Hour), + Through: now, + Refs: inputChunkRefs, + Filters: []*logproto.LineFilterExpression{ + {Operator: 1, Match: "does not match"}, + }, + } + ctx := user.InjectOrgID(context.Background(), tenantID) + res, err := gw.FilterChunkRefs(ctx, req) + require.NoError(t, err) + + expectedResponse := &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{}, + } + require.Equal(t, expectedResponse, res) + }) + + t.Run("match - return filtered", func(t *testing.T) { + inputChunkRefs := groupRefs(t, chunkRefs) + // hack to get indexed key for a specific series + // the indexed key range for a series is defined as + // i * keysPerSeries ... i * keysPerSeries + keysPerSeries - 1 + // where i is the nth series in a block + // fortunately, i is also used as Checksum for the single chunk of a series + // see mkBasicSeriesWithBlooms() in pkg/storage/bloom/v1/test_util.go + key := inputChunkRefs[0].Refs[0].Checksum*1000 + 500 + + req := &logproto.FilterChunkRefRequest{ + From: now.Add(-8 * time.Hour), + Through: now, + Refs: inputChunkRefs, + Filters: []*logproto.LineFilterExpression{ + {Operator: 1, Match: fmt.Sprint(key)}, + }, + } + ctx := user.InjectOrgID(context.Background(), tenantID) + res, err := gw.FilterChunkRefs(ctx, req) + require.NoError(t, err) + + expectedResponse := &logproto.FilterChunkRefResponse{ + ChunkRefs: inputChunkRefs[:1], + } + require.Equal(t, expectedResponse, res) + }) + + }) +} + +func createBlockQueriers(t *testing.T, numBlocks int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockQuerierWithFingerprintRange, [][]v1.SeriesWithBloom) { + t.Helper() + step := (maxFp - minFp) / model.Fingerprint(numBlocks) + bqs := make([]bloomshipper.BlockQuerierWithFingerprintRange, 0, numBlocks) + series := make([][]v1.SeriesWithBloom, 0, numBlocks) + for i := 0; i < numBlocks; i++ { + fromFp := minFp + (step * model.Fingerprint(i)) + throughFp := fromFp + step - 1 + // last block needs to include maxFp + if i == numBlocks-1 { + throughFp = maxFp + } + blockQuerier, data := v1.MakeBlockQuerier(t, fromFp, throughFp, from, through) + bq := bloomshipper.BlockQuerierWithFingerprintRange{ + BlockQuerier: blockQuerier, + MinFp: fromFp, + MaxFp: throughFp, + } + bqs = append(bqs, bq) + series = append(series, data) + } + return bqs, series } -type fakeLimits struct { +func newMockBloomStore(bqs []bloomshipper.BlockQuerierWithFingerprintRange) *mockBloomStore { + return &mockBloomStore{bqs: bqs} } -func (f fakeLimits) BloomGatewayShardSize(_ string) int { - //TODO implement me - panic("implement me") +type mockBloomStore struct { + bqs []bloomshipper.BlockQuerierWithFingerprintRange } -func (f fakeLimits) BloomGatewayEnabled(_ string) bool { - //TODO implement me - panic("implement me") +// GetBlockQueriersForBlockRefs implements bloomshipper.Store. +func (s *mockBloomStore) GetBlockQueriersForBlockRefs(_ context.Context, _ string, _ []bloomshipper.BlockRef) ([]bloomshipper.BlockQuerierWithFingerprintRange, error) { + return s.bqs, nil } -func (f fakeLimits) BloomGatewayBlocksDownloadingParallelism(_ string) int { - //TODO implement me - panic("implement me") +// GetBlockRefs implements bloomshipper.Store. +func (s *mockBloomStore) GetBlockRefs(_ context.Context, tenant string, _, _ time.Time) ([]bloomshipper.BlockRef, error) { + blocks := make([]bloomshipper.BlockRef, 0, len(s.bqs)) + for i := range s.bqs { + blocks = append(blocks, bloomshipper.BlockRef{ + Ref: bloomshipper.Ref{ + MinFingerprint: uint64(s.bqs[i].MinFp), + MaxFingerprint: uint64(s.bqs[i].MaxFp), + TenantID: tenant, + }, + }) + } + return blocks, nil +} + +// GetBlockQueriers implements bloomshipper.Store. +func (s *mockBloomStore) GetBlockQueriers(_ context.Context, _ string, _, _ time.Time, _ []uint64) ([]bloomshipper.BlockQuerierWithFingerprintRange, error) { + return s.bqs, nil +} + +func (s *mockBloomStore) Stop() {} + +func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.SeriesWithBloom, nthSeries int) []*logproto.ChunkRef { + t.Helper() + n := 0 + res := make([]*logproto.ChunkRef, 0) + for i := range data { + for j := range data[i] { + if n%nthSeries == 0 { + chk := data[i][j].Series.Chunks[0] + res = append(res, &logproto.ChunkRef{ + Fingerprint: uint64(data[i][j].Series.Fingerprint), + UserID: tenant, + From: chk.Start, + Through: chk.End, + Checksum: chk.Checksum, + }) + } + n++ + } + } + return res } diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go new file mode 100644 index 0000000000000..17063a4903d23 --- /dev/null +++ b/pkg/bloomgateway/multiplexing.go @@ -0,0 +1,221 @@ +package bloomgateway + +import ( + "sort" + "time" + + "github.com/oklog/ulid" + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/logproto" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" +) + +const ( + Day = 24 * time.Hour +) + +// Task is the data structure that is enqueued to the internal queue and dequeued by query workers +type Task struct { + // ID is a lexcographically sortable unique identifier of the task + ID ulid.ULID + // Tenant is the tenant ID + Tenant string + // Request is the original request + Request *logproto.FilterChunkRefRequest + // ErrCh is a send-only channel to write an error to + ErrCh chan<- error + // ResCh is a send-only channel to write partial responses to + ResCh chan<- v1.Output +} + +// NewTask returns a new Task that can be enqueued to the task queue. +// In addition, it returns a result and an error channel, as well +// as an error if the instantiation fails. +func NewTask(tenantID string, req *logproto.FilterChunkRefRequest) (Task, chan v1.Output, chan error, error) { + key, err := ulid.New(ulid.Now(), nil) + if err != nil { + return Task{}, nil, nil, err + } + errCh := make(chan error, 1) + resCh := make(chan v1.Output, 1) + task := Task{ + ID: key, + Tenant: tenantID, + Request: req, + ErrCh: errCh, + ResCh: resCh, + } + return task, resCh, errCh, nil +} + +// Copy returns a copy of the existing task but with a new slice of chunks +func (t Task) Copy(refs []*logproto.GroupedChunkRefs) Task { + return Task{ + ID: t.ID, + Tenant: t.Tenant, + Request: &logproto.FilterChunkRefRequest{ + From: t.Request.From, + Through: t.Request.Through, + Filters: t.Request.Filters, + Refs: refs, + }, + ErrCh: t.ErrCh, + ResCh: t.ResCh, + } +} + +// Bounds returns the day boundaries of the task +func (t Task) Bounds() (time.Time, time.Time) { + return getDayTime(t.Request.From), getDayTime(t.Request.Through) +} + +func (t Task) ChunkIterForDay(day time.Time) v1.Iterator[*logproto.GroupedChunkRefs] { + cf := filterGroupedChunkRefsByDay{day: day} + return &FilterIter[*logproto.GroupedChunkRefs]{ + iter: v1.NewSliceIter(t.Request.Refs), + matches: cf.contains, + transform: cf.filter, + } +} + +type filterGroupedChunkRefsByDay struct { + day time.Time +} + +func (cf filterGroupedChunkRefsByDay) contains(a *logproto.GroupedChunkRefs) bool { + from, through := getFromThrough(a.Refs) + if from.Time().After(cf.day.Add(Day)) || through.Time().Before(cf.day) { + return false + } + return true +} + +func (cf filterGroupedChunkRefsByDay) filter(a *logproto.GroupedChunkRefs) *logproto.GroupedChunkRefs { + minTs, maxTs := getFromThrough(a.Refs) + + // in most cases, all chunks are within day range + if minTs.Time().Compare(cf.day) >= 0 && maxTs.Time().Before(cf.day.Add(Day)) { + return a + } + + // case where certain chunks are outside of day range + // using binary search to get min and max index of chunks that fall into the day range + min := sort.Search(len(a.Refs), func(i int) bool { + start := a.Refs[i].From.Time() + end := a.Refs[i].Through.Time() + return start.Compare(cf.day) >= 0 || end.Compare(cf.day) >= 0 + }) + + max := sort.Search(len(a.Refs), func(i int) bool { + start := a.Refs[i].From.Time() + return start.Compare(cf.day.Add(Day)) > 0 + }) + + return &logproto.GroupedChunkRefs{ + Tenant: a.Tenant, + Fingerprint: a.Fingerprint, + Refs: a.Refs[min:max], + } +} + +type Predicate[T any] func(a T) bool +type Transform[T any] func(a T) T + +type FilterIter[T any] struct { + iter v1.Iterator[T] + matches Predicate[T] + transform Transform[T] + cache T + zero T // zero value of the return type of Next() +} + +func (it *FilterIter[T]) Next() bool { + next := it.iter.Next() + if !next { + it.cache = it.zero + return false + } + for next && !it.matches(it.iter.At()) { + next = it.iter.Next() + if !next { + it.cache = it.zero + return false + } + } + it.cache = it.transform(it.iter.At()) + return true +} + +func (it *FilterIter[T]) At() T { + return it.cache +} + +func (it *FilterIter[T]) Err() error { + return nil +} + +// FilterRequest extends v1.Request with an error channel +type FilterRequest struct { + v1.Request + Error chan<- error +} + +// taskMergeIterator implements v1.Iterator +type taskMergeIterator struct { + curr FilterRequest + heap *v1.HeapIterator[IndexedValue[*logproto.GroupedChunkRefs]] + tasks []Task + day time.Time + err error +} + +func newTaskMergeIterator(day time.Time, tasks ...Task) v1.PeekingIterator[v1.Request] { + it := &taskMergeIterator{ + tasks: tasks, + curr: FilterRequest{}, + day: day, + } + it.init() + return v1.NewPeekingIter[v1.Request](it) +} + +func (it *taskMergeIterator) init() { + sequences := make([]v1.PeekingIterator[IndexedValue[*logproto.GroupedChunkRefs]], 0, len(it.tasks)) + for i := range it.tasks { + iter := NewIterWithIndex(it.tasks[i].ChunkIterForDay(it.day), i) + sequences = append(sequences, v1.NewPeekingIter(iter)) + } + it.heap = v1.NewHeapIterator( + func(i, j IndexedValue[*logproto.GroupedChunkRefs]) bool { + return i.val.Fingerprint < j.val.Fingerprint + }, + sequences..., + ) + it.err = nil +} + +func (it *taskMergeIterator) Next() bool { + ok := it.heap.Next() + if !ok { + return false + } + + group := it.heap.At() + task := it.tasks[group.idx] + + it.curr.Fp = model.Fingerprint(group.val.Fingerprint) + it.curr.Chks = convertToChunkRefs(group.val.Refs) + it.curr.Searches = convertToSearches(task.Request.Filters) + it.curr.Response = task.ResCh + it.curr.Error = task.ErrCh + return true +} + +func (it *taskMergeIterator) At() v1.Request { + return it.curr.Request +} + +func (it *taskMergeIterator) Err() error { + return it.err +} diff --git a/pkg/bloomgateway/multiplexing_test.go b/pkg/bloomgateway/multiplexing_test.go new file mode 100644 index 0000000000000..93e5e5686fdaf --- /dev/null +++ b/pkg/bloomgateway/multiplexing_test.go @@ -0,0 +1,203 @@ +package bloomgateway + +import ( + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logproto" +) + +func TestTask(t *testing.T) { + t.Run("bounds returns request boundaries", func(t *testing.T) { + ts := model.Now() + req := &logproto.FilterChunkRefRequest{ + From: ts.Add(-1 * time.Hour), + Through: ts, + } + task, _, _, err := NewTask("tenant", req) + require.NoError(t, err) + from, through := task.Bounds() + require.Equal(t, getDayTime(req.From), from) + require.Equal(t, getDayTime(req.Through), through) + }) +} + +func TestTaskMergeIterator(t *testing.T) { + // Thu Nov 09 2023 10:56:50 UTC + ts := model.TimeFromUnix(1699523810) + day := getDayTime(ts) + tenant := "fake" + + t.Run("empty requests result in empty iterator", func(t *testing.T) { + r1 := &logproto.FilterChunkRefRequest{ + From: ts.Add(-3 * time.Hour), + Through: ts.Add(-2 * time.Hour), + Refs: []*logproto.GroupedChunkRefs{}, + } + t1, _, _, err := NewTask(tenant, r1) + require.NoError(t, err) + + r2 := &logproto.FilterChunkRefRequest{ + From: ts.Add(-1 * time.Hour), + Through: ts, + Refs: []*logproto.GroupedChunkRefs{}, + } + t2, _, _, err := NewTask(tenant, r2) + require.NoError(t, err) + + r3 := &logproto.FilterChunkRefRequest{ + From: ts.Add(-1 * time.Hour), + Through: ts, + Refs: []*logproto.GroupedChunkRefs{}, + } + t3, _, _, err := NewTask(tenant, r3) + require.NoError(t, err) + + it := newTaskMergeIterator(day, t1, t2, t3) + // nothing to iterate over + require.False(t, it.Next()) + }) + + t.Run("merge multiple tasks in ascending fingerprint order", func(t *testing.T) { + r1 := &logproto.FilterChunkRefRequest{ + From: ts.Add(-3 * time.Hour), + Through: ts.Add(-2 * time.Hour), + Refs: []*logproto.GroupedChunkRefs{ + {Fingerprint: 100, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-3 * time.Hour), Through: ts.Add(-2 * time.Hour), Checksum: 100}, + }}, + }, + } + t1, _, _, err := NewTask(tenant, r1) + require.NoError(t, err) + + r2 := &logproto.FilterChunkRefRequest{ + From: ts.Add(-1 * time.Hour), + Through: ts, + Refs: []*logproto.GroupedChunkRefs{ + {Fingerprint: 100, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 200}, + }}, + {Fingerprint: 200, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 300}, + }}, + }, + } + t2, _, _, err := NewTask(tenant, r2) + require.NoError(t, err) + + r3 := &logproto.FilterChunkRefRequest{ + From: ts.Add(-1 * time.Hour), + Through: ts, + Refs: []*logproto.GroupedChunkRefs{ + {Fingerprint: 200, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 400}, + }}, + }, + } + t3, _, _, err := NewTask(tenant, r3) + require.NoError(t, err) + + it := newTaskMergeIterator(day, t1, t2, t3) + + // first item + require.True(t, it.Next()) + r := it.At() + require.Equal(t, model.Fingerprint(100), r.Fp) + require.Equal(t, uint32(100), r.Chks[0].Checksum) + + // second item + require.True(t, it.Next()) + r = it.At() + require.Equal(t, model.Fingerprint(100), r.Fp) + require.Equal(t, uint32(200), r.Chks[0].Checksum) + + // third item + require.True(t, it.Next()) + r = it.At() + require.Equal(t, model.Fingerprint(200), r.Fp) + require.Equal(t, uint32(300), r.Chks[0].Checksum) + + // fourth item + require.True(t, it.Next()) + r = it.At() + require.Equal(t, model.Fingerprint(200), r.Fp) + require.Equal(t, uint32(400), r.Chks[0].Checksum) + + // no more items + require.False(t, it.Next()) + }) +} + +func TestChunkIterForDay(t *testing.T) { + tenant := "fake" + + // Thu Nov 09 2023 10:56:50 UTC + ts := model.TimeFromUnix(1699523810) + + t.Run("filter chunk refs that fall into the day range", func(t *testing.T) { + input := &logproto.FilterChunkRefRequest{ + From: ts.Add(-168 * time.Hour), // 1w ago + Through: ts, + Refs: []*logproto.GroupedChunkRefs{ + {Fingerprint: 100, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-168 * time.Hour), Through: ts.Add(-167 * time.Hour), Checksum: 100}, + {From: ts.Add(-143 * time.Hour), Through: ts.Add(-142 * time.Hour), Checksum: 101}, + }}, + {Fingerprint: 200, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-144 * time.Hour), Through: ts.Add(-143 * time.Hour), Checksum: 200}, + {From: ts.Add(-119 * time.Hour), Through: ts.Add(-118 * time.Hour), Checksum: 201}, + }}, + {Fingerprint: 300, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-120 * time.Hour), Through: ts.Add(-119 * time.Hour), Checksum: 300}, + {From: ts.Add(-95 * time.Hour), Through: ts.Add(-94 * time.Hour), Checksum: 301}, + }}, + {Fingerprint: 400, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-96 * time.Hour), Through: ts.Add(-95 * time.Hour), Checksum: 400}, + {From: ts.Add(-71 * time.Hour), Through: ts.Add(-70 * time.Hour), Checksum: 401}, + }}, + {Fingerprint: 500, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-72 * time.Hour), Through: ts.Add(-71 * time.Hour), Checksum: 500}, + {From: ts.Add(-47 * time.Hour), Through: ts.Add(-46 * time.Hour), Checksum: 501}, + }}, + {Fingerprint: 600, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-48 * time.Hour), Through: ts.Add(-47 * time.Hour), Checksum: 600}, + {From: ts.Add(-23 * time.Hour), Through: ts.Add(-22 * time.Hour), Checksum: 601}, + }}, + {Fingerprint: 700, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-24 * time.Hour), Through: ts.Add(-23 * time.Hour), Checksum: 700}, + {From: ts.Add(-1 * time.Hour), Through: ts, Checksum: 701}, + }}, + }, + Filters: []*logproto.LineFilterExpression{ + {Operator: 1, Match: "foo"}, + {Operator: 1, Match: "bar"}, + }, + } + + // day ranges from ts-48h to ts-24h + day := getDayTime(ts.Add(-36 * time.Hour)) + + expected := []*logproto.GroupedChunkRefs{ + {Fingerprint: 500, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-47 * time.Hour), Through: ts.Add(-46 * time.Hour), Checksum: 501}, + }}, + {Fingerprint: 600, Tenant: tenant, Refs: []*logproto.ShortRef{ + {From: ts.Add(-48 * time.Hour), Through: ts.Add(-47 * time.Hour), Checksum: 600}, + }}, + } + + task, _, _, _ := NewTask(tenant, input) + it := task.ChunkIterForDay(day) + + output := make([]*logproto.GroupedChunkRefs, 0, len(input.Refs)) + for it.Next() { + output = append(output, it.At()) + } + + require.Equal(t, expected, output) + }) +} diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go new file mode 100644 index 0000000000000..87187e071b82d --- /dev/null +++ b/pkg/bloomgateway/util.go @@ -0,0 +1,167 @@ +package bloomgateway + +import ( + "sort" + "time" + + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/logproto" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" +) + +type IndexedValue[T any] struct { + idx int + val T +} + +type IterWithIndex[T any] struct { + v1.Iterator[T] + zero T // zero value of T + cache IndexedValue[T] +} + +func (it *IterWithIndex[T]) At() IndexedValue[T] { + it.cache.val = it.Iterator.At() + return it.cache +} + +func NewIterWithIndex[T any](iter v1.Iterator[T], idx int) v1.Iterator[IndexedValue[T]] { + return &IterWithIndex[T]{ + Iterator: iter, + cache: IndexedValue[T]{idx: idx}, + } +} + +type SliceIterWithIndex[T any] struct { + xs []T // source slice + pos int // position within the slice + zero T // zero value of T + cache IndexedValue[T] +} + +func (it *SliceIterWithIndex[T]) Next() bool { + it.pos++ + return it.pos < len(it.xs) +} + +func (it *SliceIterWithIndex[T]) Err() error { + return nil +} + +func (it *SliceIterWithIndex[T]) At() IndexedValue[T] { + it.cache.val = it.xs[it.pos] + return it.cache +} + +func (it *SliceIterWithIndex[T]) Peek() (IndexedValue[T], bool) { + if it.pos+1 >= len(it.xs) { + it.cache.val = it.zero + return it.cache, false + } + it.cache.val = it.xs[it.pos+1] + return it.cache, true +} + +func NewSliceIterWithIndex[T any](xs []T, idx int) v1.PeekingIterator[IndexedValue[T]] { + return &SliceIterWithIndex[T]{ + xs: xs, + pos: -1, + cache: IndexedValue[T]{idx: idx}, + } +} + +func getDayTime(ts model.Time) time.Time { + return time.Date(ts.Time().Year(), ts.Time().Month(), ts.Time().Day(), 0, 0, 0, 0, time.UTC) +} + +// TODO(chaudum): Fix Through time calculation +// getFromThrough assumes a list of ShortRefs sorted by From time +// However, it does also assume that the last item has the highest +// Through time, which might not be the case! +func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) { + if len(refs) == 0 { + return model.Earliest, model.Latest + } + return refs[0].From, refs[len(refs)-1].Through +} + +// convertToSearches converts a list of line filter expressions to a list of +// byte slices that can be used with the bloom filters. +// TODO(chaudum): Currently this function only supports equality matchers, +// but we eventually also want to support regex matchers. +func convertToSearches(filters []*logproto.LineFilterExpression) [][]byte { + searches := make([][]byte, 0, len(filters)) + for _, f := range filters { + searches = append(searches, []byte(f.Match)) + } + return searches +} + +// convertToShortRefs converts a v1.ChunkRefs into []*logproto.ShortRef +// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request. +func convertToShortRefs(refs v1.ChunkRefs) []*logproto.ShortRef { + result := make([]*logproto.ShortRef, 0, len(refs)) + for _, ref := range refs { + result = append(result, &logproto.ShortRef{From: ref.Start, Through: ref.End, Checksum: ref.Checksum}) + } + return result +} + +// convertToChunkRefs converts a []*logproto.ShortRef into v1.ChunkRefs +// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request. +func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs { + result := make(v1.ChunkRefs, 0, len(refs)) + for _, ref := range refs { + result = append(result, v1.ChunkRef{Start: ref.From, End: ref.Through, Checksum: ref.Checksum}) + } + return result +} + +// getFirstLast returns the first and last item of a fingerprint slice +// It assumes an ascending sorted list of fingerprints. +func getFirstLast[T any](s []T) (T, T) { + var zero T + if len(s) == 0 { + return zero, zero + } + return s[0], s[len(s)-1] +} + +type boundedTasks struct { + blockRef bloomshipper.BlockRef + tasks []Task +} + +func partitionFingerprintRange(tasks []Task, blocks []bloomshipper.BlockRef) (result []boundedTasks) { + for _, block := range blocks { + bounded := boundedTasks{ + blockRef: block, + } + + for _, task := range tasks { + refs := task.Request.Refs + min := sort.Search(len(refs), func(i int) bool { + return block.Cmp(refs[i].Fingerprint) > v1.Before + }) + + max := sort.Search(len(refs), func(i int) bool { + return block.Cmp(refs[i].Fingerprint) == v1.After + }) + + // All fingerprints fall outside of the consumer's range + if min == len(refs) || max == 0 { + continue + } + + bounded.tasks = append(bounded.tasks, task.Copy(refs[min:max])) + } + + if len(bounded.tasks) > 0 { + result = append(result, bounded) + } + + } + return result +} diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go new file mode 100644 index 0000000000000..1424c56a19153 --- /dev/null +++ b/pkg/bloomgateway/util_test.go @@ -0,0 +1,84 @@ +package bloomgateway + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" +) + +func TestSliceIterWithIndex(t *testing.T) { + t.Run("SliceIterWithIndex implements v1.PeekingIterator interface", func(t *testing.T) { + xs := []string{"a", "b", "c"} + it := NewSliceIterWithIndex(xs, 123) + + // peek at first item + p, ok := it.Peek() + require.True(t, ok) + require.Equal(t, "a", p.val) + require.Equal(t, 123, p.idx) + + // proceed to first item + require.True(t, it.Next()) + require.Equal(t, "a", it.At().val) + require.Equal(t, 123, it.At().idx) + + // proceed to second and third item + require.True(t, it.Next()) + require.True(t, it.Next()) + + // peek at non-existing fourth item + p, ok = it.Peek() + require.False(t, ok) + require.Equal(t, "", p.val) // "" is zero value for type string + require.Equal(t, 123, p.idx) + }) +} + +func mkBlockRef(minFp, maxFp uint64) bloomshipper.BlockRef { + return bloomshipper.BlockRef{ + Ref: bloomshipper.Ref{ + MinFingerprint: minFp, + MaxFingerprint: maxFp, + }, + } +} + +func TestPartitionFingerprintRange(t *testing.T) { + seriesPerBound := 100 + bounds := []bloomshipper.BlockRef{ + mkBlockRef(0, 99), + mkBlockRef(100, 199), + mkBlockRef(200, 299), + mkBlockRef(300, 399), // one out of bounds block + } + + nTasks := 4 + nSeries := 300 + tasks := make([]Task, nTasks) + for i := 0; i < nSeries; i++ { + if tasks[i%4].Request == nil { + tasks[i%4].Request = &logproto.FilterChunkRefRequest{} + } + tasks[i%4].Request.Refs = append(tasks[i%nTasks].Request.Refs, &logproto.GroupedChunkRefs{Fingerprint: uint64(i)}) + } + + results := partitionFingerprintRange(tasks, bounds) + require.Equal(t, 3, len(results)) // ensure we only return bounds in range + for _, res := range results { + // ensure we have the right number of tasks per bound + for i := 0; i < nTasks; i++ { + require.Equal(t, seriesPerBound/nTasks, len(res.tasks[i].Request.Refs)) + } + } + + // ensure bound membership + for i := 0; i < nSeries; i++ { + require.Equal(t, + &logproto.GroupedChunkRefs{Fingerprint: uint64(i)}, + results[i/seriesPerBound].tasks[i%nTasks].Request.Refs[i%seriesPerBound/nTasks], + ) + } +} diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go new file mode 100644 index 0000000000000..f39632b1219ff --- /dev/null +++ b/pkg/bloomgateway/worker.go @@ -0,0 +1,227 @@ +package bloomgateway + +import ( + "context" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/services" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/queue" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" +) + +type workerConfig struct { + maxWaitTime time.Duration + maxItems int +} + +type workerMetrics struct { + dequeuedTasks *prometheus.CounterVec + dequeueErrors *prometheus.CounterVec + dequeueWaitTime *prometheus.SummaryVec + storeAccessLatency *prometheus.HistogramVec +} + +func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem string) *workerMetrics { + labels := []string{"worker"} + return &workerMetrics{ + dequeuedTasks: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dequeued_tasks_total", + Help: "Total amount of tasks that the worker dequeued from the bloom query queue", + }, labels), + dequeueErrors: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dequeue_errors_total", + Help: "Total amount of failed dequeue operations", + }, labels), + dequeueWaitTime: promauto.With(registerer).NewSummaryVec(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dequeue_wait_time", + Help: "Time spent waiting for dequeuing tasks from queue", + }, labels), + storeAccessLatency: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "store_latency", + Help: "Latency in seconds of accessing the bloom store component", + }, append(labels, "operation")), + } +} + +// worker is a datastructure that consumes tasks from the request queue, +// processes them and returns the result/error back to the response channels of +// the tasks. +// It is responsible for multiplexing tasks so they can be processes in a more +// efficient way. +type worker struct { + services.Service + + id string + cfg workerConfig + queue *queue.RequestQueue + store bloomshipper.Store + tasks *pendingTasks + logger log.Logger + metrics *workerMetrics +} + +func newWorker(id string, cfg workerConfig, queue *queue.RequestQueue, store bloomshipper.Store, tasks *pendingTasks, logger log.Logger, metrics *workerMetrics) *worker { + w := &worker{ + id: id, + cfg: cfg, + queue: queue, + store: store, + tasks: tasks, + logger: log.With(logger, "worker", id), + metrics: metrics, + } + w.Service = services.NewBasicService(w.starting, w.running, w.stopping).WithName(id) + return w +} + +func (w *worker) starting(_ context.Context) error { + level.Debug(w.logger).Log("msg", "starting worker") + w.queue.RegisterConsumerConnection(w.id) + return nil +} + +func (w *worker) running(ctx context.Context) error { + idx := queue.StartIndexWithLocalQueue + + for { + select { + + case <-ctx.Done(): + return ctx.Err() + + default: + taskCtx := context.Background() + dequeueStart := time.Now() + items, newIdx, err := w.queue.DequeueMany(taskCtx, idx, w.id, w.cfg.maxItems, w.cfg.maxWaitTime) + w.metrics.dequeueWaitTime.WithLabelValues(w.id).Observe(time.Since(dequeueStart).Seconds()) + if err != nil { + // We only return an error if the queue is stopped and dequeuing did not yield any items + if err == queue.ErrStopped && len(items) == 0 { + return err + } + w.metrics.dequeueErrors.WithLabelValues(w.id).Inc() + level.Error(w.logger).Log("msg", "failed to dequeue tasks", "err", err, "items", len(items)) + } + idx = newIdx + + if len(items) == 0 { + w.queue.ReleaseRequests(items) + continue + } + w.metrics.dequeuedTasks.WithLabelValues(w.id).Add(float64(len(items))) + + tasksPerDay := make(map[time.Time][]Task) + + for _, item := range items { + task, ok := item.(Task) + if !ok { + // This really should never happen, because only the bloom gateway itself can enqueue tasks. + w.queue.ReleaseRequests(items) + return errors.Errorf("failed to cast dequeued item to Task: %v", item) + } + level.Debug(w.logger).Log("msg", "dequeued task", "task", task.ID) + w.tasks.Delete(task.ID) + + fromDay, throughDay := task.Bounds() + + if fromDay.Equal(throughDay) { + tasksPerDay[fromDay] = append(tasksPerDay[fromDay], task) + } else { + for i := fromDay; i.Before(throughDay); i = i.Add(24 * time.Hour) { + tasksPerDay[i] = append(tasksPerDay[i], task) + } + } + } + + for day, tasks := range tasksPerDay { + logger := log.With(w.logger, "day", day) + level.Debug(logger).Log("msg", "process tasks", "tasks", len(tasks)) + + storeFetchStart := time.Now() + blockRefs, err := w.store.GetBlockRefs(taskCtx, tasks[0].Tenant, day, day.Add(Day).Add(-1*time.Nanosecond)) + w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockRefs").Observe(time.Since(storeFetchStart).Seconds()) + if err != nil { + for _, t := range tasks { + t.ErrCh <- err + } + // continue with tasks of next day + continue + } + // No blocks found. + // Since there are no blocks for the given tasks, we need to return the + // unfiltered list of chunk refs. + if len(blockRefs) == 0 { + level.Warn(logger).Log("msg", "no blocks found") + for _, t := range tasks { + for _, ref := range t.Request.Refs { + t.ResCh <- v1.Output{ + Fp: model.Fingerprint(ref.Fingerprint), + Removals: nil, + } + } + } + // continue with tasks of next day + continue + } + + boundedRefs := partitionFingerprintRange(tasks, blockRefs) + blockRefs = blockRefs[:0] + for _, b := range boundedRefs { + blockRefs = append(blockRefs, b.blockRef) + } + + // GetBlockQueriersForBlockRefs() waits until all blocks are downloaded and available for querying. + // TODO(chaudum): Add API that allows to process blocks as soon as they become available. + // This will require to change the taskMergeIterator to a slice of requests so we can seek + // to the appropriate fingerprint range within the slice that matches the block's fingerprint range. + storeFetchStart = time.Now() + blockQueriers, err := w.store.GetBlockQueriersForBlockRefs(taskCtx, tasks[0].Tenant, blockRefs) + w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockQueriersForBlockRefs").Observe(time.Since(storeFetchStart).Seconds()) + if err != nil { + for _, t := range tasks { + t.ErrCh <- err + } + // continue with tasks of next day + continue + } + + for i, blockQuerier := range blockQueriers { + it := newTaskMergeIterator(day, boundedRefs[i].tasks...) + fq := blockQuerier.Fuse([]v1.PeekingIterator[v1.Request]{it}) + err := fq.Run() + if err != nil { + for _, t := range boundedRefs[i].tasks { + t.ErrCh <- errors.Wrap(err, "failed to run chunk check") + } + } + } + } + + // return dequeued items back to the pool + w.queue.ReleaseRequests(items) + + } + } +} + +func (w *worker) stopping(err error) error { + level.Debug(w.logger).Log("msg", "stopping worker", "err", err) + w.queue.UnregisterConsumerConnection(w.id) + return nil +} diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index 055b7b5c92717..a7bebfbfccf14 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -20,7 +20,6 @@ import ( "github.com/grafana/loki/pkg/querier/queryrange" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/util" - lokiutil "github.com/grafana/loki/pkg/util" ) type Config struct { @@ -151,7 +150,7 @@ func newQuerierWorkerWithProcessor(cfg Config, metrics *Metrics, logger log.Logg } if ring != nil { - w, err := lokiutil.NewRingWatcher(log.With(logger, "component", "querier-scheduler-worker"), ring, cfg.DNSLookupPeriod, f) + w, err := util.NewRingWatcher(log.With(logger, "component", "querier-scheduler-worker"), ring, cfg.DNSLookupPeriod, f) if err != nil { return nil, err } diff --git a/pkg/queue/queue.go b/pkg/queue/queue.go index fa1860e4e88d3..f0475164bd4d1 100644 --- a/pkg/queue/queue.go +++ b/pkg/queue/queue.go @@ -59,6 +59,7 @@ type RequestQueue struct { stopped bool metrics *Metrics + pool *SlicePool[Request] } func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, metrics *Metrics) *RequestQueue { @@ -66,6 +67,7 @@ func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, met queues: newTenantQueues(maxOutstandingPerTenant, forgetDelay), connectedConsumers: atomic.NewInt32(0), metrics: metrics, + pool: NewSlicePool[Request](1<<6, 1<<10, 2), // Buckets are [64, 128, 256, 512, 1024]. } q.cond = contextCond{Cond: sync.NewCond(&q.mtx)} @@ -125,6 +127,41 @@ func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, maxQue } } +// ReleaseRequests returns items back to the slice pool. +// Must only be called in combination with DequeueMany(). +func (q *RequestQueue) ReleaseRequests(items []Request) { + q.pool.Put(items) +} + +// DequeueMany consumes multiple items for a single tenant from the queue. +// It returns maxItems and waits maxWait if no requests for this tenant are enqueued. +// The caller is responsible for returning the dequeued requests back to the +// pool by calling ReleaseRequests(items). +func (q *RequestQueue) DequeueMany(ctx context.Context, last QueueIndex, consumerID string, maxItems int, maxWait time.Duration) ([]Request, QueueIndex, error) { + // create a context for dequeuing with a max time we want to wait to fullfill the desired maxItems + + dequeueCtx, cancel := context.WithTimeout(ctx, maxWait) + defer cancel() + + var idx QueueIndex + + items := q.pool.Get(maxItems) + for { + item, newIdx, err := q.Dequeue(dequeueCtx, last, consumerID) + if err != nil { + if err == context.DeadlineExceeded { + err = nil + } + return items, idx, err + } + items = append(items, item) + idx = newIdx + if len(items) == maxItems { + return items, idx, nil + } + } +} + // Dequeue find next tenant queue and takes the next request off of it. Will block if there are no requests. // By passing tenant index from previous call of this method, querier guarantees that it iterates over all tenants fairly. // If consumer finds that request from the tenant is already expired, it can get a request for the same tenant by using UserIndex.ReuseLastUser. diff --git a/pkg/queue/util.go b/pkg/queue/util.go new file mode 100644 index 0000000000000..9b7fced6dfbf7 --- /dev/null +++ b/pkg/queue/util.go @@ -0,0 +1,25 @@ +package queue + +import "github.com/prometheus/prometheus/util/pool" + +// SlicePool uses a bucket pool and wraps the Get() and Put() functions for +// simpler access. +type SlicePool[T any] struct { + p *pool.Pool +} + +func NewSlicePool[T any](minSize, maxSize int, factor float64) *SlicePool[T] { + return &SlicePool[T]{ + p: pool.New(minSize, maxSize, factor, func(i int) interface{} { + return make([]T, 0, i) + }), + } +} + +func (sp *SlicePool[T]) Get(n int) []T { + return sp.p.Get(n).([]T) +} + +func (sp *SlicePool[T]) Put(buf []T) { + sp.p.Put(buf[0:0]) +} diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go index 0ea6f6451ebac..e7278b971f6c8 100644 --- a/pkg/storage/bloom/v1/builder_test.go +++ b/pkg/storage/bloom/v1/builder_test.go @@ -3,51 +3,13 @@ package v1 import ( "bytes" "errors" - "fmt" "testing" - "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/chunkenc" - "github.com/grafana/loki/pkg/storage/bloom/v1/filter" ) -func mkBasicSeriesWithBlooms(nSeries, keysPerSeries int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithBloom, keysList [][][]byte) { - seriesList = make([]SeriesWithBloom, 0, nSeries) - keysList = make([][][]byte, 0, nSeries) - for i := 0; i < nSeries; i++ { - var series Series - step := (throughFp - fromFp) / (model.Fingerprint(nSeries)) - series.Fingerprint = fromFp + model.Fingerprint(i)*step - timeDelta := fromTs + (throughTs-fromTs)/model.Time(nSeries)*model.Time(i) - series.Chunks = []ChunkRef{ - { - Start: fromTs + timeDelta*model.Time(i), - End: fromTs + timeDelta*model.Time(i), - Checksum: uint32(i), - }, - } - - var bloom Bloom - bloom.ScalableBloomFilter = *filter.NewScalableBloomFilter(1024, 0.01, 0.8) - - keys := make([][]byte, 0, keysPerSeries) - for j := 0; j < keysPerSeries; j++ { - key := []byte(fmt.Sprint(j)) - bloom.Add(key) - keys = append(keys, key) - } - - seriesList = append(seriesList, SeriesWithBloom{ - Series: &series, - Bloom: &bloom, - }) - keysList = append(keysList, keys) - } - return -} - func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual Iterator[T]) { for expected.Next() { require.True(t, actual.Next()) diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go index 021fba1e81856..c397a7a55fd57 100644 --- a/pkg/storage/bloom/v1/fuse.go +++ b/pkg/storage/bloom/v1/fuse.go @@ -1,56 +1,53 @@ package v1 import ( - "sort" - "github.com/efficientgo/core/errors" "github.com/prometheus/common/model" ) -type request struct { - fp model.Fingerprint - chks ChunkRefs - searches [][]byte - response chan output +type Request struct { + Fp model.Fingerprint + Chks ChunkRefs + Searches [][]byte + Response chan<- Output } -// output represents a chunk that was present in the bloom -// but failed to pass the search filters and can be removed from -// the list of chunks to download -type output struct { - fp model.Fingerprint - removals ChunkRefs +// Output represents a chunk that failed to pass all searches +// and must be downloaded +type Output struct { + Fp model.Fingerprint + Removals ChunkRefs } // Fuse combines multiple requests into a single loop iteration // over the data set and returns the corresponding outputs // TODO(owen-d): better async control -func (bq *BlockQuerier) Fuse(inputs []PeekingIterator[request]) *FusedQuerier { +func (bq *BlockQuerier) Fuse(inputs []PeekingIterator[Request]) *FusedQuerier { return NewFusedQuerier(bq, inputs) } type FusedQuerier struct { bq *BlockQuerier - inputs Iterator[[]request] + inputs Iterator[[]Request] } -func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[request]) *FusedQuerier { - heap := NewHeapIterator[request]( - func(a, b request) bool { - return a.fp < b.fp +func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[Request]) *FusedQuerier { + heap := NewHeapIterator[Request]( + func(a, b Request) bool { + return a.Fp < b.Fp }, inputs..., ) - merging := NewDedupingIter[request, []request]( - func(a request, b []request) bool { - return a.fp == b[0].fp + merging := NewDedupingIter[Request, []Request]( + func(a Request, b []Request) bool { + return a.Fp == b[0].Fp }, - func(a request) []request { return []request{a} }, - func(a request, b []request) []request { + func(a Request) []Request { return []Request{a} }, + func(a Request, b []Request) []Request { return append(b, a) }, - NewPeekingIter[request](heap), + NewPeekingIter[Request](heap), ) return &FusedQuerier{ bq: bq, @@ -63,7 +60,7 @@ func (fq *FusedQuerier) Run() error { // find all queries for the next relevant fingerprint nextBatch := fq.inputs.At() - fp := nextBatch[0].fp + fp := nextBatch[0].Fp // advance the series iterator to the next fingerprint if err := fq.bq.Seek(fp); err != nil { @@ -79,9 +76,9 @@ func (fq *FusedQuerier) Run() error { if series.Fingerprint != fp { // fingerprint not found, can't remove chunks for _, input := range nextBatch { - input.response <- output{ - fp: fp, - removals: nil, + input.Response <- Output{ + Fp: fp, + Removals: nil, } } } @@ -91,9 +88,9 @@ func (fq *FusedQuerier) Run() error { if !fq.bq.blooms.Next() { // fingerprint not found, can't remove chunks for _, input := range nextBatch { - input.response <- output{ - fp: fp, - removals: nil, + input.Response <- Output{ + Fp: fp, + Removals: nil, } } continue @@ -103,19 +100,17 @@ func (fq *FusedQuerier) Run() error { // test every input against this chunk inputLoop: for _, input := range nextBatch { - _, inBlooms := input.chks.Compare(series.Chunks, true) + _, inBlooms := input.Chks.Compare(series.Chunks, true) // First, see if the search passes the series level bloom before checking for chunks individually - for _, search := range input.searches { + for _, search := range input.Searches { if !bloom.Test(search) { - // the entire series bloom didn't pass one of the searches, - // so we can skip checking chunks individually. // We return all the chunks that were the intersection of the query // because they for sure do not match the search and don't // need to be downloaded - input.response <- output{ - fp: fp, - removals: inBlooms, + input.Response <- Output{ + Fp: fp, + Removals: inBlooms, } continue inputLoop } @@ -126,7 +121,7 @@ func (fq *FusedQuerier) Run() error { chunkLoop: for _, chk := range inBlooms { - for _, search := range input.searches { + for _, search := range input.Searches { // TODO(owen-d): meld chunk + search into a single byte slice from the block schema var combined = search @@ -138,9 +133,9 @@ func (fq *FusedQuerier) Run() error { // Otherwise, the chunk passed all the searches } - input.response <- output{ - fp: fp, - removals: removals, + input.Response <- Output{ + Fp: fp, + Removals: removals, } } @@ -148,43 +143,3 @@ func (fq *FusedQuerier) Run() error { return nil } - -// boundedRequests is a set of requests that are clamped to a specific range -type boundedRequests struct { - bounds FingerprintBounds - reqs [][]model.Fingerprint -} - -// reqs models a set of requests covering many fingerprints. -// consumers models a set of blocks covering different fingerprint ranges -func partitionFingerprintRange(reqs [][]model.Fingerprint, blocks []FingerprintBounds) (res []boundedRequests) { - for _, block := range blocks { - bounded := boundedRequests{ - bounds: block, - } - - for _, req := range reqs { - min := sort.Search(len(req), func(i int) bool { - return block.Cmp(req[i]) > Before - }) - - max := sort.Search(len(req), func(i int) bool { - return block.Cmp(req[i]) == After - }) - - // All fingerprints fall outside of the consumer's range - if min == len(req) || max == 0 { - continue - } - - bounded.reqs = append(bounded.reqs, req[min:max]) - } - - if len(bounded.reqs) > 0 { - res = append(res, bounded) - } - - } - - return res -} diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go index b990d69f4b7bd..e784ac0168201 100644 --- a/pkg/storage/bloom/v1/fuse_test.go +++ b/pkg/storage/bloom/v1/fuse_test.go @@ -7,44 +7,11 @@ import ( "testing" "github.com/grafana/dskit/concurrency" - "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/chunkenc" ) -func TestPartitionFingerprintRange(t *testing.T) { - seriesPerBound := 100 - bounds := []FingerprintBounds{ - {0, 99}, - {100, 199}, - {200, 299}, - {300, 399}, // one out of bounds block - } - - nReqs := 4 - nSeries := 300 - reqs := make([][]model.Fingerprint, nReqs) - for i := 0; i < nSeries; i++ { - reqs[i%4] = append(reqs[i%nReqs], model.Fingerprint(i)) - } - - results := partitionFingerprintRange(reqs, bounds) - require.Equal(t, 3, len(results)) // ensure we only return bounds in range - for _, res := range results { - // ensure we have the right number of requests per bound - for i := 0; i < nReqs; i++ { - require.Equal(t, seriesPerBound/nReqs, len(res.reqs[i])) - } - } - - // ensure bound membership - for i := 0; i < nSeries; i++ { - require.Equal(t, model.Fingerprint(i), results[i/seriesPerBound].reqs[i%nReqs][i%seriesPerBound/nReqs]) - } - -} - func TestFusedQuerier(t *testing.T) { // references for linking in memory reader+writer indexBuf := bytes.NewBuffer(nil) @@ -74,37 +41,39 @@ func TestFusedQuerier(t *testing.T) { querier := NewBlockQuerier(block) nReqs := 10 - var inputs [][]request + var inputs [][]Request + var resChans []chan Output for i := 0; i < nReqs; i++ { - ch := make(chan output) - var reqs []request + ch := make(chan Output) + var reqs []Request // find 2 series for each for j := 0; j < 2; j++ { idx := numSeries/nReqs*i + j - reqs = append(reqs, request{ - fp: data[idx].Series.Fingerprint, - chks: data[idx].Series.Chunks, - response: ch, + reqs = append(reqs, Request{ + Fp: data[idx].Series.Fingerprint, + Chks: data[idx].Series.Chunks, + Response: ch, }) } inputs = append(inputs, reqs) + resChans = append(resChans, ch) } - var itrs []PeekingIterator[request] + var itrs []PeekingIterator[Request] for _, reqs := range inputs { - itrs = append(itrs, NewPeekingIter[request](NewSliceIter[request](reqs))) + itrs = append(itrs, NewPeekingIter[Request](NewSliceIter[Request](reqs))) } - resps := make([][]output, nReqs) + resps := make([][]Output, nReqs) var g sync.WaitGroup g.Add(1) go func() { require.Nil(t, concurrency.ForEachJob( context.Background(), - len(resps), - len(resps), + len(resChans), + len(resChans), func(_ context.Context, i int) error { - for v := range inputs[i][0].response { + for v := range resChans[i] { resps[i] = append(resps[i], v) } return nil @@ -117,7 +86,7 @@ func TestFusedQuerier(t *testing.T) { require.Nil(t, fused.Run()) for _, input := range inputs { - close(input[0].response) + close(input[0].Response) } g.Wait() @@ -126,9 +95,9 @@ func TestFusedQuerier(t *testing.T) { resp := resps[i][j] require.Equal( t, - output{ - fp: req.fp, - removals: nil, + Output{ + Fp: req.Fp, + Removals: nil, }, resp, ) @@ -136,7 +105,7 @@ func TestFusedQuerier(t *testing.T) { } } -func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]request) { +func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Output) { indexBuf := bytes.NewBuffer(nil) bloomsBuf := bytes.NewBuffer(nil) writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) @@ -165,11 +134,12 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]request) { numRequestChains := 100 seriesPerRequest := 100 - var requestChains [][]request + var requestChains [][]Request + var responseChans []chan Output for i := 0; i < numRequestChains; i++ { - var reqs []request + var reqs []Request // ensure they use the same channel - ch := make(chan output) + ch := make(chan Output) // evenly spread out the series queried within a single request chain // to mimic series distribution across keyspace for j := 0; j < seriesPerRequest; j++ { @@ -178,21 +148,22 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]request) { if idx >= numSeries { idx = numSeries - 1 } - reqs = append(reqs, request{ - fp: data[idx].Series.Fingerprint, - chks: data[idx].Series.Chunks, - response: ch, + reqs = append(reqs, Request{ + Fp: data[idx].Series.Fingerprint, + Chks: data[idx].Series.Chunks, + Response: ch, }) } requestChains = append(requestChains, reqs) + responseChans = append(responseChans, ch) } - return querier, requestChains + return querier, requestChains, responseChans } func BenchmarkBlockQuerying(b *testing.B) { b.StopTimer() - querier, requestChains := setupBlockForBenchmark(b) + querier, requestChains, responseChans := setupBlockForBenchmark(b) // benchmark b.StartTimer() @@ -200,7 +171,7 @@ func BenchmarkBlockQuerying(b *testing.B) { for i := 0; i < b.N; i++ { for _, chain := range requestChains { for _, req := range chain { - _, _ = querier.CheckChunksForSeries(req.fp, req.chks, nil) + _, _ = querier.CheckChunksForSeries(req.Fp, req.Chks, nil) } } } @@ -211,22 +182,22 @@ func BenchmarkBlockQuerying(b *testing.B) { go func() { require.Nil(b, concurrency.ForEachJob( context.Background(), - len(requestChains), len(requestChains), + len(responseChans), len(responseChans), func(_ context.Context, idx int) error { // nolint:revive - for range requestChains[idx][0].response { + for range responseChans[idx] { } return nil }, )) }() - var itrs []PeekingIterator[request] + var itrs []PeekingIterator[Request] for i := 0; i < b.N; i++ { itrs = itrs[:0] for _, reqs := range requestChains { - itrs = append(itrs, NewPeekingIter[request](NewSliceIter[request](reqs))) + itrs = append(itrs, NewPeekingIter[Request](NewSliceIter[Request](reqs))) } fused := querier.Fuse(itrs) _ = fused.Run() diff --git a/pkg/storage/bloom/v1/test_util.go b/pkg/storage/bloom/v1/test_util.go new file mode 100644 index 0000000000000..215ecaffe177e --- /dev/null +++ b/pkg/storage/bloom/v1/test_util.go @@ -0,0 +1,81 @@ +package v1 + +import ( + "bytes" + "fmt" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/chunkenc" + "github.com/grafana/loki/pkg/storage/bloom/v1/filter" +) + +func MakeBlockQuerier(t testing.TB, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (*BlockQuerier, []SeriesWithBloom) { + // references for linking in memory reader+writer + indexBuf := bytes.NewBuffer(nil) + bloomsBuf := bytes.NewBuffer(nil) + writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) + reader := NewByteReader(indexBuf, bloomsBuf) + numSeries := int(throughFp - fromFp) + numKeysPerSeries := 1000 + data, _ := mkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, fromFp, throughFp, fromTs, throughTs) + + builder, err := NewBlockBuilder( + BlockOptions{ + schema: Schema{ + version: DefaultSchemaVersion, + encoding: chunkenc.EncSnappy, + }, + SeriesPageSize: 100, + BloomPageSize: 10 << 10, + }, + writer, + ) + require.Nil(t, err) + itr := NewSliceIter[SeriesWithBloom](data) + _, err = builder.BuildFrom(itr) + require.Nil(t, err) + block := NewBlock(reader) + return NewBlockQuerier(block), data +} + +func mkBasicSeriesWithBlooms(nSeries, keysPerSeries int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithBloom, keysList [][][]byte) { + seriesList = make([]SeriesWithBloom, 0, nSeries) + keysList = make([][][]byte, 0, nSeries) + + step := (throughFp - fromFp) / model.Fingerprint(nSeries) + timeDelta := time.Duration(throughTs.Sub(fromTs).Nanoseconds() / int64(nSeries)) + + for i := 0; i < nSeries; i++ { + var series Series + series.Fingerprint = fromFp + model.Fingerprint(i)*step + from := fromTs.Add(timeDelta * time.Duration(i)) + series.Chunks = []ChunkRef{ + { + Start: from, + End: from.Add(timeDelta), + Checksum: uint32(i), + }, + } + + var bloom Bloom + bloom.ScalableBloomFilter = *filter.NewScalableBloomFilter(1024, 0.01, 0.8) + + keys := make([][]byte, 0, keysPerSeries) + for j := 0; j < keysPerSeries; j++ { + key := []byte(fmt.Sprint(i*keysPerSeries + j)) + bloom.Add(key) + keys = append(keys, key) + } + + seriesList = append(seriesList, SeriesWithBloom{ + Series: &series, + Bloom: &bloom, + }) + keysList = append(keysList, keys) + } + return +} diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go index 5709bf8866f21..d1e9f24ef866b 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client.go +++ b/pkg/storage/stores/shipper/bloomshipper/client.go @@ -11,11 +11,11 @@ import ( "strings" "time" - "github.com/prometheus/common/model" - "github.com/grafana/dskit/concurrency" + "github.com/prometheus/common/model" "github.com/grafana/loki/pkg/storage" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/util/math" @@ -37,6 +37,16 @@ type Ref struct { Checksum uint32 } +// Cmp returns the fingerprint's position relative to the bounds +func (b Ref) Cmp(fp uint64) v1.BoundsCheck { + if fp < b.MinFingerprint { + return v1.Before + } else if fp > b.MaxFingerprint { + return v1.After + } + return v1.Overlap +} + type BlockRef struct { Ref IndexPath string diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go index 98dbbb20a476a..c04cad433308a 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go @@ -39,30 +39,30 @@ func NewShipper(client Client, config config.Config, limits Limits, logger log.L }, nil } -func (s *Shipper) ForEachBlock( - ctx context.Context, - tenantID string, - from, through time.Time, - fingerprints []uint64, - callback ForEachBlockCallback) error { +func (s *Shipper) GetBlockRefs(ctx context.Context, tenantID string, from, through time.Time) ([]BlockRef, error) { + level.Debug(s.logger).Log("msg", "GetBlockRefs", "tenant", tenantID, "from", from, "through", through) - level.Debug(s.logger).Log("msg", "ForEachBlock", "tenant", tenantID, "from", from, "through", through, "fingerprints", len(fingerprints)) - - blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from.UnixNano(), through.UnixNano(), fingerprints) + blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from.UnixNano(), through.UnixNano(), nil) if err != nil { - return fmt.Errorf("error fetching active block references : %w", err) + return nil, fmt.Errorf("error fetching active block references : %w", err) } + return blockRefs, nil +} +func (s *Shipper) Fetch(ctx context.Context, tenantID string, blocks []BlockRef, callback ForEachBlockCallback) error { cancelContext, cancelFunc := context.WithCancel(ctx) defer cancelFunc() - blocksChannel, errorsChannel := s.blockDownloader.downloadBlocks(cancelContext, tenantID, blockRefs) + blocksChannel, errorsChannel := s.blockDownloader.downloadBlocks(cancelContext, tenantID, blocks) + for { select { + case <-ctx.Done(): + return fmt.Errorf("failed to fetch blocks: %w", ctx.Err()) case result, ok := <-blocksChannel: if !ok { return nil } - err = callback(result.BlockQuerier) + err := callback(result.BlockQuerier, result.MinFingerprint, result.MaxFingerprint) if err != nil { return fmt.Errorf("error running callback function for block %s err: %w", result.BlockPath, err) } @@ -74,27 +74,34 @@ func (s *Shipper) ForEachBlock( } } +func (s *Shipper) ForEachBlock(ctx context.Context, tenantID string, from, through time.Time, fingerprints []uint64, callback ForEachBlockCallback) error { + level.Debug(s.logger).Log("msg", "ForEachBlock", "tenant", tenantID, "from", from, "through", through, "fingerprints", len(fingerprints)) + + blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from.UnixNano(), through.UnixNano(), fingerprints) + if err != nil { + return fmt.Errorf("error fetching active block references : %w", err) + } + + return s.Fetch(ctx, tenantID, blockRefs, callback) +} + func (s *Shipper) Stop() { s.client.Stop() s.blockDownloader.stop() } -// getFromThrough returns the first and list item of a fingerprint slice +// getFirstLast returns the first and last item of a fingerprint slice // It assumes an ascending sorted list of fingerprints. -func getFromThrough(fingerprints []uint64) (uint64, uint64) { - if len(fingerprints) == 0 { - return 0, 0 +func getFirstLast[T any](s []T) (T, T) { + var zero T + if len(s) == 0 { + return zero, zero } - return fingerprints[0], fingerprints[len(fingerprints)-1] + return s[0], s[len(s)-1] } -func (s *Shipper) getActiveBlockRefs( - ctx context.Context, - tenantID string, - from, through int64, - fingerprints []uint64) ([]BlockRef, error) { - - minFingerprint, maxFingerprint := getFromThrough(fingerprints) +func (s *Shipper) getActiveBlockRefs(ctx context.Context, tenantID string, from, through int64, fingerprints []uint64) ([]BlockRef, error) { + minFingerprint, maxFingerprint := getFirstLast(fingerprints) metas, err := s.client.GetMetas(ctx, MetaSearchParams{ TenantID: tenantID, MinFingerprint: minFingerprint, @@ -164,7 +171,7 @@ func isOutsideRange(b *BlockRef, startTimestamp, endTimestamp int64, fingerprint } // Then, check if outside of min/max of fingerprint slice - minFp, maxFp := getFromThrough(fingerprints) + minFp, maxFp := getFirstLast(fingerprints) if b.MaxFingerprint < minFp || b.MinFingerprint > maxFp { return true } diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go index 80f2c352d5326..70c61ba0add8e 100644 --- a/pkg/storage/stores/shipper/bloomshipper/store.go +++ b/pkg/storage/stores/shipper/bloomshipper/store.go @@ -2,18 +2,20 @@ package bloomshipper import ( "context" + "sort" "time" "github.com/prometheus/common/model" - "github.com/grafana/loki/pkg/logproto" v1 "github.com/grafana/loki/pkg/storage/bloom/v1" ) -type ForEachBlockCallback func(bq *v1.BlockQuerier) error +type ForEachBlockCallback func(bq *v1.BlockQuerier, minFp, maxFp uint64) error type ReadShipper interface { + GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error) ForEachBlock(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64, callback ForEachBlockCallback) error + Fetch(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error } type Interface interface { @@ -21,8 +23,15 @@ type Interface interface { Stop() } +type BlockQuerierWithFingerprintRange struct { + *v1.BlockQuerier + MinFp, MaxFp model.Fingerprint +} + type Store interface { - FilterChunkRefs(ctx context.Context, tenant string, from, through time.Time, chunkRefs []*logproto.GroupedChunkRefs, filters ...*logproto.LineFilterExpression) ([]*logproto.GroupedChunkRefs, error) + GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error) + GetBlockQueriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) ([]BlockQuerierWithFingerprintRange, error) + GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error) Stop() } @@ -40,84 +49,41 @@ func (bs *BloomStore) Stop() { bs.shipper.Stop() } -func (bs *BloomStore) FilterChunkRefs(ctx context.Context, tenant string, from, through time.Time, chunkRefs []*logproto.GroupedChunkRefs, filters ...*logproto.LineFilterExpression) ([]*logproto.GroupedChunkRefs, error) { - fingerprints := make([]uint64, 0, len(chunkRefs)) - for _, ref := range chunkRefs { - fingerprints = append(fingerprints, ref.Fingerprint) - } - - blooms, err := bs.queriers(ctx, tenant, from, through, fingerprints) - if err != nil { - return nil, err - } - - searches := convertLineFilterExpressions(filters) - - for _, ref := range chunkRefs { - refs, err := blooms.Filter(ctx, model.Fingerprint(ref.Fingerprint), convertToChunkRefs(ref.Refs), searches) - if err != nil { - return nil, err - } - ref.Refs = convertToShortRefs(refs) - } - return chunkRefs, nil +// GetBlockRefs implements Store +func (bs *BloomStore) GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error) { + return bs.shipper.GetBlockRefs(ctx, tenant, from, through) } -func (bs *BloomStore) queriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) (*bloomQueriers, error) { - bf := newBloomFilters(1024) - err := bs.shipper.ForEachBlock(ctx, tenant, from, through, fingerprints, func(bq *v1.BlockQuerier) error { - bf.queriers = append(bf.queriers, bq) +// GetQueriersForBlocks implements Store +func (bs *BloomStore) GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error) { + bqs := make([]BlockQuerierWithFingerprintRange, 0, 32) + err := bs.shipper.Fetch(ctx, tenant, blocks, func(bq *v1.BlockQuerier, minFp uint64, maxFp uint64) error { + bqs = append(bqs, BlockQuerierWithFingerprintRange{ + BlockQuerier: bq, + MinFp: model.Fingerprint(minFp), + MaxFp: model.Fingerprint(maxFp), + }) return nil }) - return bf, err -} - -func convertLineFilterExpressions(filters []*logproto.LineFilterExpression) [][]byte { - searches := make([][]byte, len(filters)) - for _, f := range filters { - searches = append(searches, []byte(f.Match)) - } - return searches -} - -// convertToShortRefs converts a v1.ChunkRefs into []*logproto.ShortRef -// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request. -func convertToShortRefs(refs v1.ChunkRefs) []*logproto.ShortRef { - result := make([]*logproto.ShortRef, len(refs)) - for _, ref := range refs { - result = append(result, &logproto.ShortRef{From: ref.Start, Through: ref.End, Checksum: ref.Checksum}) - } - return result -} - -// convertToChunkRefs converts a []*logproto.ShortRef into v1.ChunkRefs -// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request. -func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs { - result := make(v1.ChunkRefs, len(refs)) - for _, ref := range refs { - result = append(result, v1.ChunkRef{Start: ref.From, End: ref.Through, Checksum: ref.Checksum}) - } - return result -} - -type bloomQueriers struct { - queriers []*v1.BlockQuerier -} - -func newBloomFilters(size int) *bloomQueriers { - return &bloomQueriers{ - queriers: make([]*v1.BlockQuerier, size), - } + sort.Slice(bqs, func(i, j int) bool { + return bqs[i].MinFp < bqs[j].MinFp + }) + return bqs, err } -func (bf *bloomQueriers) Filter(_ context.Context, fp model.Fingerprint, chunkRefs v1.ChunkRefs, filters [][]byte) (v1.ChunkRefs, error) { - result := make(v1.ChunkRefs, len(chunkRefs)) - for _, bq := range bf.queriers { - refs, err := bq.CheckChunksForSeries(fp, chunkRefs, filters) - if err != nil { - return nil, err - } - result = append(result, refs...) - } - return result, nil +// BlockQueriers implements Store +func (bs *BloomStore) GetBlockQueriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) ([]BlockQuerierWithFingerprintRange, error) { + bqs := make([]BlockQuerierWithFingerprintRange, 0, 32) + err := bs.shipper.ForEachBlock(ctx, tenant, from, through, fingerprints, func(bq *v1.BlockQuerier, minFp uint64, maxFp uint64) error { + bqs = append(bqs, BlockQuerierWithFingerprintRange{ + BlockQuerier: bq, + MinFp: model.Fingerprint(minFp), + MaxFp: model.Fingerprint(maxFp), + }) + return nil + }) + sort.Slice(bqs, func(i, j int) bool { + return bqs[i].MinFp < bqs[j].MinFp + }) + return bqs, err }