From 97212eadf15c2b5ee2cd59b7c1df71f6177cfe7e Mon Sep 17 00:00:00 2001 From: Jay Clifford <45856600+Jayclifford345@users.noreply.github.com> Date: Thu, 23 May 2024 12:10:48 -0400 Subject: [PATCH 01/12] feat: Added Interactive Sandbox to Quickstart tutorial (#12701) --- docs/sources/get-started/quick-start.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md index b08f07a8e7973..f459e564092e1 100644 --- a/docs/sources/get-started/quick-start.md +++ b/docs/sources/get-started/quick-start.md @@ -22,6 +22,18 @@ The Docker Compose configuration instantiates the following components, each in {{< figure max-width="75%" src="/media/docs/loki/get-started-flog-v3.png" caption="Getting started sample application" alt="Getting started sample application">}} +## Interactive Learning Environment + +{{< admonition type="note" >}} +The Interactive Learning Environment is currently in trial. Please provide feedback, report bugs, and raise issues in the [Grafana Killercoda Repository](https://github.com/grafana/killercoda). +{{< /admonition >}} + +Try out this demo within our interactive learning environment: [Loki Quickstart Sandbox](https://killercoda.com/grafana-labs/course/loki/loki-quickstart) + +- A free Killercoda account is required to verify you are not a bot. +- Tutorial instructions are located on the left-hand side of the screen. Click to move on to the next section. +- All commands run inside the interactive terminal. Grafana can also be accessed via the URL links provided within the sandbox. + ## Installing Loki and collecting sample logs Prerequisites From efdae3df14c47d627eb99e91466e0451db6e16f6 Mon Sep 17 00:00:00 2001 From: hayden Date: Thu, 23 May 2024 16:25:50 -0400 Subject: [PATCH 02/12] feat(helm): Support for PVC Annotations for Non-Distributed Modes (#12023) Signed-off-by: hfuss Co-authored-by: J Stickler Co-authored-by: Trevor Whitney --- docs/Makefile | 1 + docs/sources/setup/install/helm/reference.md | 39 +++++++++++++++++++ production/helm/loki/CHANGELOG.md | 4 ++ production/helm/loki/Chart.yaml | 2 +- production/helm/loki/README.md | 2 +- .../backend/statefulset-backend.yaml | 4 ++ .../loki/templates/read/statefulset-read.yaml | 4 ++ .../templates/single-binary/statefulset.yaml | 4 ++ .../templates/write/statefulset-write.yaml | 4 ++ production/helm/loki/values.yaml | 9 +++++ 10 files changed, 71 insertions(+), 2 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index 63fc849789c11..4bed302d71794 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -10,6 +10,7 @@ include docs.mk PODMAN := $(shell if command -v podman >/dev/null 2>&1; then echo podman; else echo docker; fi) BUILD_IN_CONTAINER ?= true +.PHONY: sources/setup/install/helm/reference.md sources/setup/install/helm/reference.md: ../production/helm/loki/reference.md.gotmpl ../production/helm/loki/values.yaml ifeq ($(BUILD_IN_CONTAINER),true) $(PODMAN) run --rm --volume "$(realpath ..):/helm-docs" -u "$$(id -u)" "docker.io/jnorwood/helm-docs:v1.11.0" \ diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 53101a4832143..76b4936f20bfd 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -315,6 +315,7 @@ This is the generated reference for the Loki Helm Chart values. "initContainers": [], "nodeSelector": {}, "persistence": { + "annotations": {}, "dataVolumeParameters": { "emptyDir": {} }, @@ -512,6 +513,15 @@ null
 {}
 
+ + + + backend.persistence.annotations + object + Annotations for volume claim +
+{}
+
@@ -6226,6 +6236,7 @@ false "drivesPerNode": 2, "enabled": false, "persistence": { + "annotations": {}, "size": "5Gi" }, "replicas": 1, @@ -8442,6 +8453,7 @@ false "lifecycle": {}, "nodeSelector": {}, "persistence": { + "annotations": {}, "enableStatefulSetAutoDeletePVC": true, "selector": null, "size": "10Gi", @@ -8653,6 +8665,15 @@ false
 {}
 
+ + + + read.persistence.annotations + object + Annotations for volume claim +
+{}
+
@@ -9893,6 +9914,15 @@ null
 {}
 
+ + + + singleBinary.persistence.annotations + object + Annotations for volume claim +
+{}
+
@@ -10677,6 +10707,15 @@ null
 {}
 
+ + + + write.persistence.annotations + object + Annotations for volume claim +
+{}
+
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 1606c89914f88..77b801e603631 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.6.0 + +- [ENHANCEMENT] Allow setting PVC annotations for all volume claim templates in simple scalable and single binary mode + ## 6.5.2 - [BUGFIX] Fixed Ingress routing for all deployment modes. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 989a54d146a1d..637e66d70887e 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.0.0 -version: 6.5.2 +version: 6.6.0 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 55a7256c72f7f..5fa6bd548bad7 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.5.2](https://img.shields.io/badge/Version-6.5.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) +![Version: 6.6.0](https://img.shields.io/badge/Version-6.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. diff --git a/production/helm/loki/templates/backend/statefulset-backend.yaml b/production/helm/loki/templates/backend/statefulset-backend.yaml index f96f0a4d21217..534190d4a4533 100644 --- a/production/helm/loki/templates/backend/statefulset-backend.yaml +++ b/production/helm/loki/templates/backend/statefulset-backend.yaml @@ -266,6 +266,10 @@ spec: kind: PersistentVolumeClaim metadata: name: data + {{- with .Values.backend.persistence.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} spec: accessModes: - ReadWriteOnce diff --git a/production/helm/loki/templates/read/statefulset-read.yaml b/production/helm/loki/templates/read/statefulset-read.yaml index 0a31de4996dfb..7696d90e65bd6 100644 --- a/production/helm/loki/templates/read/statefulset-read.yaml +++ b/production/helm/loki/templates/read/statefulset-read.yaml @@ -180,6 +180,10 @@ spec: kind: PersistentVolumeClaim metadata: name: data + {{- with .Values.read.persistence.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} spec: accessModes: - ReadWriteOnce diff --git a/production/helm/loki/templates/single-binary/statefulset.yaml b/production/helm/loki/templates/single-binary/statefulset.yaml index 51c0062fc94ff..7bd2b9813f609 100644 --- a/production/helm/loki/templates/single-binary/statefulset.yaml +++ b/production/helm/loki/templates/single-binary/statefulset.yaml @@ -175,6 +175,10 @@ spec: kind: PersistentVolumeClaim metadata: name: storage + {{- with .Values.singleBinary.persistence.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} spec: accessModes: - ReadWriteOnce diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml index 54c936958b559..75605c27c26cb 100644 --- a/production/helm/loki/templates/write/statefulset-write.yaml +++ b/production/helm/loki/templates/write/statefulset-write.yaml @@ -193,6 +193,10 @@ spec: kind: PersistentVolumeClaim metadata: name: data + {{- with .Values.write.persistence.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} spec: accessModes: - ReadWriteOnce diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 3edfc24ba34fb..4c70bf16fe474 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -1294,6 +1294,8 @@ singleBinary: storageClass: null # -- Selector for persistent disk selector: null + # -- Annotations for volume claim + annotations: {} ###################################################################################################################### # # Simple Scalable Deployment (SSD) Mode @@ -1421,6 +1423,8 @@ write: storageClass: null # -- Selector for persistent disk selector: null + # -- Annotations for volume claim + annotations: {} # -- Configuration for the read pod(s) read: # -- Number of replicas for the read @@ -1528,6 +1532,8 @@ read: storageClass: null # -- Selector for persistent disk selector: null + # -- Annotations for volume claim + annotations: {} # -- Configuration for the backend pod(s) backend: # -- Number of replicas for the backend @@ -1636,6 +1642,8 @@ backend: storageClass: null # -- Selector for persistent disk selector: null + # -- Annotations for volume claim + annotations: {} ###################################################################################################################### # # Microservices Mode @@ -3091,6 +3099,7 @@ minio: purge: false persistence: size: 5Gi + annotations: {} resources: requests: cpu: 100m From ca030a5c4335b0258e83aebd8779ea4d348003f3 Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Fri, 24 May 2024 01:55:59 -0600 Subject: [PATCH 03/12] fix: change log level since this is a known case (#13029) --- pkg/querier/queryrange/limits.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index f90a9aa3c4e5e..68f71680dd676 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -345,7 +345,7 @@ func (q *querySizeLimiter) Do(ctx context.Context, r queryrangebase.Request) (qu // Only support TSDB schemaCfg, err := q.getSchemaCfg(r) if err != nil { - level.Error(log).Log("msg", "failed to get schema config, not applying querySizeLimit", "err", err) + level.Warn(log).Log("msg", "failed to get schema config, not applying querySizeLimit", "err", err) return q.next.Do(ctx, r) } if schemaCfg.IndexType != types.TSDBType { From 8101e21f9973b8261de0ee3eb34fa4d7b88ddaac Mon Sep 17 00:00:00 2001 From: Lex Rivera Date: Fri, 24 May 2024 16:08:56 +0300 Subject: [PATCH 04/12] fix(helm): fix queryScheduler servicemonitor (#12753) Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- production/helm/loki/CHANGELOG.md | 4 ++++ production/helm/loki/Chart.yaml | 2 +- production/helm/loki/README.md | 2 +- .../templates/query-scheduler/service-query-scheduler.yaml | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 77b801e603631..a86a7d0281825 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.6.1 + +- [BUGFIX] Fix query scheduler http-metrics targetPort + ## 6.6.0 - [ENHANCEMENT] Allow setting PVC annotations for all volume claim templates in simple scalable and single binary mode diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 637e66d70887e..d2ec2d3d9e59b 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.0.0 -version: 6.6.0 +version: 6.6.1 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 5fa6bd548bad7..03ddb05bf6608 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.6.0](https://img.shields.io/badge/Version-6.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) +![Version: 6.6.1](https://img.shields.io/badge/Version-6.6.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. diff --git a/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml b/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml index 89883155a27e1..2b3f1b2300609 100644 --- a/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml +++ b/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml @@ -21,7 +21,7 @@ spec: ports: - name: http-metrics port: 3100 - targetPort: http + targetPort: http-metrics protocol: TCP - name: grpclb port: 9095 From 4901a5c452fa6822a645f56e20e704db9366182a Mon Sep 17 00:00:00 2001 From: Vladyslav Diachenko <82767850+vlad-diachenko@users.noreply.github.com> Date: Fri, 24 May 2024 23:53:29 +0300 Subject: [PATCH 05/12] fix: not owned stream count (#13030) Signed-off-by: Vladyslav Diachenko --- pkg/ingester/limiter_test.go | 4 ++-- pkg/ingester/owned_streams.go | 33 ++++++++++++++++++++++-------- pkg/ingester/owned_streams_test.go | 32 ++++++++++++++++++++++++++++- 3 files changed, 57 insertions(+), 12 deletions(-) diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go index 9d4d3b3037c6f..b00bede10417d 100644 --- a/pkg/ingester/limiter_test.go +++ b/pkg/ingester/limiter_test.go @@ -24,7 +24,7 @@ func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) { expected error useOwnedStreamService bool fixedLimit int32 - ownedStreamCount int64 + ownedStreamCount int }{ "both local and global limit are disabled": { maxLocalStreamsPerUser: 0, @@ -147,7 +147,7 @@ func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) { ownedStreamSvc := &ownedStreamService{ fixedLimit: atomic.NewInt32(testData.fixedLimit), - ownedStreamCount: atomic.NewInt64(testData.ownedStreamCount), + ownedStreamCount: testData.ownedStreamCount, } limiter := NewLimiter(limits, NilMetrics, ring, testData.ringReplicationFactor) defaultCountSupplier := func() int { diff --git a/pkg/ingester/owned_streams.go b/pkg/ingester/owned_streams.go index 01cb8235f9b1a..3be6fb40fdd86 100644 --- a/pkg/ingester/owned_streams.go +++ b/pkg/ingester/owned_streams.go @@ -1,6 +1,10 @@ package ingester -import "go.uber.org/atomic" +import ( + "sync" + + "go.uber.org/atomic" +) type ownedStreamService struct { tenantID string @@ -8,22 +12,25 @@ type ownedStreamService struct { fixedLimit *atomic.Int32 //todo: implement job to recalculate it - ownedStreamCount *atomic.Int64 + ownedStreamCount int + notOwnedStreamCount int + lock sync.RWMutex } func newOwnedStreamService(tenantID string, limiter *Limiter) *ownedStreamService { svc := &ownedStreamService{ - tenantID: tenantID, - limiter: limiter, - ownedStreamCount: atomic.NewInt64(0), - fixedLimit: atomic.NewInt32(0), + tenantID: tenantID, + limiter: limiter, + fixedLimit: atomic.NewInt32(0), } svc.updateFixedLimit() return svc } func (s *ownedStreamService) getOwnedStreamCount() int { - return int(s.ownedStreamCount.Load()) + s.lock.RLock() + defer s.lock.RUnlock() + return s.ownedStreamCount } func (s *ownedStreamService) updateFixedLimit() { @@ -36,9 +43,17 @@ func (s *ownedStreamService) getFixedLimit() int { } func (s *ownedStreamService) incOwnedStreamCount() { - s.ownedStreamCount.Inc() + s.lock.Lock() + defer s.lock.Unlock() + s.ownedStreamCount++ } func (s *ownedStreamService) decOwnedStreamCount() { - s.ownedStreamCount.Dec() + s.lock.Lock() + defer s.lock.Unlock() + if s.notOwnedStreamCount > 0 { + s.notOwnedStreamCount-- + return + } + s.ownedStreamCount-- } diff --git a/pkg/ingester/owned_streams_test.go b/pkg/ingester/owned_streams_test.go index c7ddd9d87f29d..759927a1d0cfe 100644 --- a/pkg/ingester/owned_streams_test.go +++ b/pkg/ingester/owned_streams_test.go @@ -1,6 +1,7 @@ package ingester import ( + "sync" "testing" "github.com/stretchr/testify/require" @@ -29,8 +30,37 @@ func Test_OwnedStreamService(t *testing.T) { service.incOwnedStreamCount() service.incOwnedStreamCount() - require.Equal(t, 2, service.getOwnedStreamCount()) + service.incOwnedStreamCount() + require.Equal(t, 3, service.getOwnedStreamCount()) + + // simulate the effect from the recalculation job + service.notOwnedStreamCount = 1 + service.ownedStreamCount = 2 + + service.decOwnedStreamCount() + require.Equal(t, 2, service.getOwnedStreamCount(), "owned stream count must be decremented only when notOwnedStreamCount is set to 0") + require.Equal(t, 0, service.notOwnedStreamCount) service.decOwnedStreamCount() require.Equal(t, 1, service.getOwnedStreamCount()) + require.Equal(t, 0, service.notOwnedStreamCount, "notOwnedStreamCount must not be decremented lower than 0") + + group := sync.WaitGroup{} + group.Add(200) + for i := 0; i < 100; i++ { + go func() { + defer group.Done() + service.incOwnedStreamCount() + }() + } + + for i := 0; i < 100; i++ { + go func() { + defer group.Done() + service.decOwnedStreamCount() + }() + } + group.Wait() + + require.Equal(t, 1, service.getOwnedStreamCount(), "owned stream count must not be changed") } From f6529c293e3281ba1c19ff36d6018c0ef338f3e6 Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Mon, 27 May 2024 12:19:13 +0200 Subject: [PATCH 06/12] refactor(blooms): Add RPC service for bloom-planner (#13015) --- pkg/bloombuild/planner/metrics.go | 7 + pkg/bloombuild/planner/planner.go | 194 +++- pkg/bloombuild/planner/planner_test.go | 214 +++- pkg/bloombuild/planner/task.go | 25 +- pkg/bloombuild/protos/compat.go | 113 +++ pkg/bloombuild/protos/service.pb.go | 1175 ++++++++++++++++++++++ pkg/bloombuild/protos/service.proto | 32 + pkg/bloombuild/protos/types.pb.go | 1255 ++++++++++++++++++++++++ pkg/bloombuild/protos/types.proto | 45 + 9 files changed, 2992 insertions(+), 68 deletions(-) create mode 100644 pkg/bloombuild/protos/compat.go create mode 100644 pkg/bloombuild/protos/service.pb.go create mode 100644 pkg/bloombuild/protos/service.proto create mode 100644 pkg/bloombuild/protos/types.pb.go create mode 100644 pkg/bloombuild/protos/types.proto diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go index 347af1926617b..9eaf453b7853c 100644 --- a/pkg/bloombuild/planner/metrics.go +++ b/pkg/bloombuild/planner/metrics.go @@ -24,6 +24,7 @@ type Metrics struct { connectedBuilders prometheus.GaugeFunc queueDuration prometheus.Histogram inflightRequests prometheus.Summary + taskLost prometheus.Counter buildStarted prometheus.Counter buildCompleted *prometheus.CounterVec @@ -65,6 +66,12 @@ func NewMetrics( MaxAge: time.Minute, AgeBuckets: 6, }), + taskLost: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "tasks_lost_total", + Help: "Total number of tasks lost due to not being picked up by a builder and failed to be requeued.", + }), buildStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index 9a5b9f6dc238e..dfb6fea80cc3f 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -4,14 +4,17 @@ import ( "context" "fmt" "sort" + "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/services" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" "github.com/grafana/loki/v3/pkg/queue" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -22,6 +25,8 @@ import ( utillog "github.com/grafana/loki/v3/pkg/util/log" ) +var errPlannerIsNotRunning = errors.New("planner is not running") + type Planner struct { services.Service // Subservices manager. @@ -38,6 +43,8 @@ type Planner struct { tasksQueue *queue.RequestQueue activeUsers *util.ActiveUsersCleanupService + pendingTasks sync.Map + metrics *Metrics logger log.Logger } @@ -92,13 +99,23 @@ func New( return p, nil } -func (p *Planner) starting(_ context.Context) (err error) { +func (p *Planner) starting(ctx context.Context) (err error) { + if err := services.StartManagerAndAwaitHealthy(ctx, p.subservices); err != nil { + return fmt.Errorf("error starting planner subservices: %w", err) + } + p.metrics.running.Set(1) - return err + return nil } func (p *Planner) stopping(_ error) error { - p.metrics.running.Set(0) + defer p.metrics.running.Set(0) + + // This will also stop the requests queue, which stop accepting new requests and errors out any pending requests. + if err := services.StopManagerAndAwaitStopped(context.Background(), p.subservices); err != nil { + return fmt.Errorf("error stopping planner subservices: %w", err) + } + return nil } @@ -108,20 +125,32 @@ func (p *Planner) running(ctx context.Context) error { level.Error(p.logger).Log("msg", "bloom build iteration failed for the first time", "err", err) } - ticker := time.NewTicker(p.cfg.PlanningInterval) - defer ticker.Stop() + planningTicker := time.NewTicker(p.cfg.PlanningInterval) + defer planningTicker.Stop() + + inflightTasksTicker := time.NewTicker(250 * time.Millisecond) + defer inflightTasksTicker.Stop() + for { select { case <-ctx.Done(): - err := ctx.Err() - level.Debug(p.logger).Log("msg", "planner context done", "err", err) - return err + if err := ctx.Err(); !errors.Is(err, context.Canceled) { + level.Error(p.logger).Log("msg", "planner context done with error", "err", err) + return err + } - case <-ticker.C: + level.Debug(p.logger).Log("msg", "planner context done") + return nil + + case <-planningTicker.C: level.Info(p.logger).Log("msg", "starting bloom build iteration") if err := p.runOne(ctx); err != nil { level.Error(p.logger).Log("msg", "bloom build iteration failed", "err", err) } + + case <-inflightTasksTicker.C: + inflight := p.totalPendingTasks() + p.metrics.inflightRequests.Observe(float64(inflight)) } } } @@ -159,19 +188,13 @@ func (p *Planner) runOne(ctx context.Context) error { now := time.Now() for _, gap := range gaps { totalTasks++ - task := Task{ - table: w.table.Addr(), - tenant: w.tenant, - OwnershipBounds: w.ownershipRange, - tsdb: gap.tsdb, - gaps: gap.gaps, - - queueTime: now, - ctx: ctx, - } - p.activeUsers.UpdateUserTimestamp(task.tenant, now) - if err := p.tasksQueue.Enqueue(task.tenant, nil, task, nil); err != nil { + task := NewTask( + ctx, now, + protos.NewTask(w.table.Addr(), w.tenant, w.ownershipRange, gap.tsdb, gap.gaps), + ) + + if err := p.enqueueTask(task); err != nil { level.Error(logger).Log("msg", "error enqueuing task", "err", err) continue } @@ -326,7 +349,7 @@ func (p *Planner) findGapsForBounds( // This is a performance optimization to avoid expensive re-reindexing type blockPlan struct { tsdb tsdb.SingleTenantTSDBIdentifier - gaps []GapWithBlocks + gaps []protos.GapWithBlocks } func (p *Planner) findOutdatedGaps( @@ -420,12 +443,12 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan for _, idx := range tsdbs { plan := blockPlan{ tsdb: idx.tsdb, - gaps: make([]GapWithBlocks, 0, len(idx.gaps)), + gaps: make([]protos.GapWithBlocks, 0, len(idx.gaps)), } for _, gap := range idx.gaps { - planGap := GapWithBlocks{ - bounds: gap, + planGap := protos.GapWithBlocks{ + Bounds: gap, } for _, meta := range metas { @@ -442,18 +465,18 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan } // this block overlaps the gap, add it to the plan // for this gap - planGap.blocks = append(planGap.blocks, block) + planGap.Blocks = append(planGap.Blocks, block) } } // ensure we sort blocks so deduping iterator works as expected - sort.Slice(planGap.blocks, func(i, j int) bool { - return planGap.blocks[i].Bounds.Less(planGap.blocks[j].Bounds) + sort.Slice(planGap.Blocks, func(i, j int) bool { + return planGap.Blocks[i].Bounds.Less(planGap.Blocks[j].Bounds) }) peekingBlocks := v1.NewPeekingIter[bloomshipper.BlockRef]( v1.NewSliceIter[bloomshipper.BlockRef]( - planGap.blocks, + planGap.Blocks, ), ) // dedupe blocks which could be in multiple metas @@ -472,7 +495,7 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan if err != nil { return nil, fmt.Errorf("failed to dedupe blocks: %w", err) } - planGap.blocks = deduped + planGap.Blocks = deduped plan.gaps = append(plan.gaps, planGap) } @@ -482,3 +505,114 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan return plans, nil } + +func (p *Planner) addPendingTask(task *Task) { + p.pendingTasks.Store(task.ID, task) +} + +func (p *Planner) removePendingTask(task *Task) { + p.pendingTasks.Delete(task.ID) +} + +func (p *Planner) totalPendingTasks() (total int) { + p.pendingTasks.Range(func(_, _ interface{}) bool { + total++ + return true + }) + return total +} + +func (p *Planner) enqueueTask(task *Task) error { + p.activeUsers.UpdateUserTimestamp(task.Tenant, time.Now()) + return p.tasksQueue.Enqueue(task.Tenant, nil, task, func() { + p.addPendingTask(task) + }) +} + +func (p *Planner) NotifyBuilderShutdown( + _ context.Context, + req *protos.NotifyBuilderShutdownRequest, +) (*protos.NotifyBuilderShutdownResponse, error) { + level.Debug(p.logger).Log("msg", "builder shutdown", "builder", req.BuilderID) + p.tasksQueue.UnregisterConsumerConnection(req.GetBuilderID()) + + return &protos.NotifyBuilderShutdownResponse{}, nil +} + +func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer) error { + resp, err := builder.Recv() + if err != nil { + return fmt.Errorf("error receiving message from builder: %w", err) + } + + builderID := resp.GetBuilderID() + logger := log.With(p.logger, "builder", builderID) + level.Debug(logger).Log("msg", "builder connected") + + p.tasksQueue.RegisterConsumerConnection(builderID) + defer p.tasksQueue.UnregisterConsumerConnection(builderID) + + lastIndex := queue.StartIndex + for p.isRunningOrStopping() { + item, idx, err := p.tasksQueue.Dequeue(builder.Context(), lastIndex, builderID) + if err != nil { + return fmt.Errorf("error dequeuing task: %w", err) + } + lastIndex = idx + + if item == nil { + + return fmt.Errorf("dequeue() call resulted in nil response. builder: %s", builderID) + } + task := item.(*Task) + + queueTime := time.Since(task.queueTime) + p.metrics.queueDuration.Observe(queueTime.Seconds()) + + if task.ctx.Err() != nil { + level.Warn(logger).Log("msg", "task context done after dequeue", "err", task.ctx.Err()) + lastIndex = lastIndex.ReuseLastIndex() + p.removePendingTask(task) + continue + } + + if err := p.forwardTaskToBuilder(builder, builderID, task); err != nil { + // Re-queue the task if the builder is failing to process the tasks + if err := p.enqueueTask(task); err != nil { + p.metrics.taskLost.Inc() + level.Error(logger).Log("msg", "error re-enqueuing task. this task will be lost", "err", err) + } + + return fmt.Errorf("error forwarding task to builder (%s). Task requeued: %w", builderID, err) + } + + } + + return errPlannerIsNotRunning +} + +func (p *Planner) forwardTaskToBuilder( + builder protos.PlannerForBuilder_BuilderLoopServer, + builderID string, + task *Task, +) error { + defer p.removePendingTask(task) + + msg := &protos.PlannerToBuilder{ + Task: task.ToProtoTask(), + } + + if err := builder.Send(msg); err != nil { + return fmt.Errorf("error sending task to builder (%s): %w", builderID, err) + } + + // TODO(salvacorts): Implement timeout and retry for builder response. + _, err := builder.Recv() + + return err +} + +func (p *Planner) isRunningOrStopping() bool { + st := p.State() + return st == services.Running || st == services.Stopping +} diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go index 346bd145ab8dc..8eccc77e19bf1 100644 --- a/pkg/bloombuild/planner/planner_test.go +++ b/pkg/bloombuild/planner/planner_test.go @@ -1,15 +1,28 @@ package planner import ( + "context" + "fmt" "testing" "time" + "github.com/go-kit/log" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" + "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/chunk/client/local" + "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" + bloomshipperconfig "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" + "github.com/grafana/loki/v3/pkg/storage/types" ) func tsdbID(n int) tsdb.SingleTenantTSDBIdentifier { @@ -155,9 +168,9 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: tsdbID(0), - gaps: []GapWithBlocks{ + gaps: []protos.GapWithBlocks{ { - bounds: v1.NewBounds(0, 10), + Bounds: v1.NewBounds(0, 10), }, }, }, @@ -173,10 +186,10 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: tsdbID(0), - gaps: []GapWithBlocks{ + gaps: []protos.GapWithBlocks{ { - bounds: v1.NewBounds(0, 10), - blocks: []bloomshipper.BlockRef{genBlockRef(9, 20)}, + Bounds: v1.NewBounds(0, 10), + Blocks: []bloomshipper.BlockRef{genBlockRef(9, 20)}, }, }, }, @@ -196,9 +209,9 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: tsdbID(0), - gaps: []GapWithBlocks{ + gaps: []protos.GapWithBlocks{ { - bounds: v1.NewBounds(0, 8), + Bounds: v1.NewBounds(0, 8), }, }, }, @@ -215,10 +228,10 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: tsdbID(0), - gaps: []GapWithBlocks{ + gaps: []protos.GapWithBlocks{ { - bounds: v1.NewBounds(0, 8), - blocks: []bloomshipper.BlockRef{genBlockRef(5, 20)}, + Bounds: v1.NewBounds(0, 8), + Blocks: []bloomshipper.BlockRef{genBlockRef(5, 20)}, }, }, }, @@ -241,32 +254,32 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: tsdbID(0), - gaps: []GapWithBlocks{ + gaps: []protos.GapWithBlocks{ // tsdb (id=0) can source chunks from the blocks built from tsdb (id=1) { - bounds: v1.NewBounds(3, 5), - blocks: []bloomshipper.BlockRef{genBlockRef(3, 5)}, + Bounds: v1.NewBounds(3, 5), + Blocks: []bloomshipper.BlockRef{genBlockRef(3, 5)}, }, { - bounds: v1.NewBounds(9, 10), - blocks: []bloomshipper.BlockRef{genBlockRef(8, 10)}, + Bounds: v1.NewBounds(9, 10), + Blocks: []bloomshipper.BlockRef{genBlockRef(8, 10)}, }, }, }, // tsdb (id=1) can source chunks from the blocks built from tsdb (id=0) { tsdb: tsdbID(1), - gaps: []GapWithBlocks{ + gaps: []protos.GapWithBlocks{ { - bounds: v1.NewBounds(0, 2), - blocks: []bloomshipper.BlockRef{ + Bounds: v1.NewBounds(0, 2), + Blocks: []bloomshipper.BlockRef{ genBlockRef(0, 1), genBlockRef(1, 2), }, }, { - bounds: v1.NewBounds(6, 7), - blocks: []bloomshipper.BlockRef{genBlockRef(6, 8)}, + Bounds: v1.NewBounds(6, 7), + Blocks: []bloomshipper.BlockRef{genBlockRef(6, 8)}, }, }, }, @@ -289,10 +302,10 @@ func Test_blockPlansForGaps(t *testing.T) { exp: []blockPlan{ { tsdb: tsdbID(0), - gaps: []GapWithBlocks{ + gaps: []protos.GapWithBlocks{ { - bounds: v1.NewBounds(0, 10), - blocks: []bloomshipper.BlockRef{ + Bounds: v1.NewBounds(0, 10), + Blocks: []bloomshipper.BlockRef{ genBlockRef(1, 4), genBlockRef(5, 10), genBlockRef(9, 20), @@ -319,3 +332,158 @@ func Test_blockPlansForGaps(t *testing.T) { }) } } + +func Test_BuilderLoop(t *testing.T) { + const ( + nTasks = 100 + nBuilders = 10 + ) + logger := log.NewNopLogger() + + limits := &fakeLimits{} + cfg := Config{ + PlanningInterval: 1 * time.Hour, + MaxQueuedTasksPerTenant: 10000, + } + schemaCfg := config.SchemaConfig{ + Configs: []config.PeriodConfig{ + { + From: parseDayTime("2023-09-01"), + IndexTables: config.IndexPeriodicTableConfig{ + PeriodicTableConfig: config.PeriodicTableConfig{ + Prefix: "index_", + Period: 24 * time.Hour, + }, + }, + IndexType: types.TSDBType, + ObjectType: types.StorageTypeFileSystem, + Schema: "v13", + RowShards: 16, + }, + }, + } + storageCfg := storage.Config{ + BloomShipperConfig: bloomshipperconfig.Config{ + WorkingDirectory: []string{t.TempDir()}, + DownloadParallelism: 1, + BlocksCache: bloomshipperconfig.BlocksCacheConfig{ + SoftLimit: flagext.Bytes(10 << 20), + HardLimit: flagext.Bytes(20 << 20), + TTL: time.Hour, + }, + }, + FSConfig: local.FSConfig{ + Directory: t.TempDir(), + }, + } + + // Create planner + planner, err := New(cfg, limits, schemaCfg, storageCfg, storage.NewClientMetrics(), nil, logger, prometheus.DefaultRegisterer) + require.NoError(t, err) + + // Start planner + err = services.StartAndAwaitRunning(context.Background(), planner) + require.NoError(t, err) + t.Cleanup(func() { + err := services.StopAndAwaitTerminated(context.Background(), planner) + require.NoError(t, err) + }) + + // Enqueue tasks + for i := 0; i < nTasks; i++ { + task := NewTask( + context.Background(), time.Now(), + protos.NewTask("fakeTable", "fakeTenant", v1.NewBounds(0, 10), tsdbID(1), nil), + ) + + err = planner.enqueueTask(task) + require.NoError(t, err) + } + + // All tasks should be pending + require.Equal(t, nTasks, planner.totalPendingTasks()) + + // Create builders and call planner.BuilderLoop + builders := make([]*fakeBuilder, 0, nBuilders) + for i := 0; i < nBuilders; i++ { + builder := newMockBuilder(fmt.Sprintf("builder-%d", i)) + builders = append(builders, builder) + + go func() { + // We ignore the error since when the planner is stopped, + // the loop will return an error (queue closed) + _ = planner.BuilderLoop(builder) + }() + } + + // Eventually, all tasks should be sent to builders + require.Eventually(t, func() bool { + var receivedTasks int + for _, builder := range builders { + receivedTasks += len(builder.ReceivedTasks()) + } + return receivedTasks == nTasks + }, 15*time.Second, 10*time.Millisecond) + + // Finally, the queue should be empty + require.Equal(t, 0, planner.totalPendingTasks()) +} + +type fakeBuilder struct { + id string + tasks []*protos.Task + grpc.ServerStream +} + +func newMockBuilder(id string) *fakeBuilder { + return &fakeBuilder{id: id} +} + +func (f *fakeBuilder) ReceivedTasks() []*protos.Task { + return f.tasks +} + +func (f *fakeBuilder) Context() context.Context { + return context.Background() +} + +func (f *fakeBuilder) Send(req *protos.PlannerToBuilder) error { + task, err := protos.FromProtoTask(req.Task) + if err != nil { + return err + } + + f.tasks = append(f.tasks, task) + return nil +} + +func (f *fakeBuilder) Recv() (*protos.BuilderToPlanner, error) { + return &protos.BuilderToPlanner{ + BuilderID: f.id, + }, nil +} + +type fakeLimits struct { +} + +func (f *fakeLimits) BloomCreationEnabled(_ string) bool { + return true +} + +func (f *fakeLimits) BloomSplitSeriesKeyspaceBy(_ string) int { + return 1 +} + +func (f *fakeLimits) BloomBuildMaxBuilders(_ string) int { + return 0 +} + +func parseDayTime(s string) config.DayTime { + t, err := time.Parse("2006-01-02", s) + if err != nil { + panic(err) + } + return config.DayTime{ + Time: model.TimeFromUnix(t.Unix()), + } +} diff --git a/pkg/bloombuild/planner/task.go b/pkg/bloombuild/planner/task.go index bff459fe17643..84c6d7617eafe 100644 --- a/pkg/bloombuild/planner/task.go +++ b/pkg/bloombuild/planner/task.go @@ -4,26 +4,21 @@ import ( "context" "time" - v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" + "github.com/grafana/loki/v3/pkg/bloombuild/protos" ) -// TODO: Extract this definiton to a proto file at pkg/bloombuild/protos/protos.proto - -type GapWithBlocks struct { - bounds v1.FingerprintBounds - blocks []bloomshipper.BlockRef -} - type Task struct { - table string - tenant string - OwnershipBounds v1.FingerprintBounds - tsdb tsdb.SingleTenantTSDBIdentifier - gaps []GapWithBlocks + *protos.Task // Tracking queueTime time.Time ctx context.Context } + +func NewTask(ctx context.Context, queueTime time.Time, task *protos.Task) *Task { + return &Task{ + Task: task, + ctx: ctx, + queueTime: queueTime, + } +} diff --git a/pkg/bloombuild/protos/compat.go b/pkg/bloombuild/protos/compat.go new file mode 100644 index 0000000000000..b1ae7cccdbab1 --- /dev/null +++ b/pkg/bloombuild/protos/compat.go @@ -0,0 +1,113 @@ +package protos + +import ( + "fmt" + + "github.com/google/uuid" + + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" +) + +type GapWithBlocks struct { + Bounds v1.FingerprintBounds + Blocks []bloomshipper.BlockRef +} + +type Task struct { + ID string + + Table string + Tenant string + OwnershipBounds v1.FingerprintBounds + TSDB tsdb.SingleTenantTSDBIdentifier + Gaps []GapWithBlocks +} + +func NewTask(table, tenant string, bounds v1.FingerprintBounds, tsdb tsdb.SingleTenantTSDBIdentifier, gaps []GapWithBlocks) *Task { + return &Task{ + ID: uuid.NewString(), + + Table: table, + Tenant: tenant, + OwnershipBounds: bounds, + TSDB: tsdb, + Gaps: gaps, + } +} + +// TODO: Use it in the builder to parse the task +func FromProtoTask(task *ProtoTask) (*Task, error) { + if task == nil { + return nil, nil + } + + tsdbRef, ok := tsdb.ParseSingleTenantTSDBPath(task.Tsdb) + if !ok { + return nil, fmt.Errorf("failed to parse tsdb path %s", task.Tsdb) + } + + gaps := make([]GapWithBlocks, 0, len(task.Gaps)) + for _, gap := range task.Gaps { + bounds := v1.FingerprintBounds{ + Min: gap.Bounds.Min, + Max: gap.Bounds.Max, + } + blocks := make([]bloomshipper.BlockRef, 0, len(gap.BlockRef)) + for _, block := range gap.BlockRef { + b, err := bloomshipper.BlockRefFromKey(block) + if err != nil { + return nil, fmt.Errorf("failed to parse block ref %s: %w", block, err) + } + + blocks = append(blocks, b) + } + gaps = append(gaps, GapWithBlocks{ + Bounds: bounds, + Blocks: blocks, + }) + } + + return &Task{ + ID: task.Id, + Table: task.Table, + Tenant: task.Tenant, + OwnershipBounds: v1.FingerprintBounds{ + Min: task.Bounds.Min, + Max: task.Bounds.Max, + }, + TSDB: tsdbRef, + Gaps: gaps, + }, nil +} + +func (t *Task) ToProtoTask() *ProtoTask { + protoGaps := make([]*ProtoGapWithBlocks, 0, len(t.Gaps)) + for _, gap := range t.Gaps { + blockRefs := make([]string, 0, len(gap.Blocks)) + for _, block := range gap.Blocks { + blockRefs = append(blockRefs, block.String()) + } + + protoGaps = append(protoGaps, &ProtoGapWithBlocks{ + Bounds: ProtoFingerprintBounds{ + Min: gap.Bounds.Min, + Max: gap.Bounds.Max, + }, + BlockRef: blockRefs, + }) + } + + return &ProtoTask{ + Id: t.ID, + Table: t.Table, + Tenant: t.Tenant, + Bounds: ProtoFingerprintBounds{ + Min: t.OwnershipBounds.Min, + Max: t.OwnershipBounds.Max, + }, + Tsdb: t.TSDB.Path(), + Gaps: protoGaps, + } +} diff --git a/pkg/bloombuild/protos/service.pb.go b/pkg/bloombuild/protos/service.pb.go new file mode 100644 index 0000000000000..91684dd90ef8e --- /dev/null +++ b/pkg/bloombuild/protos/service.pb.go @@ -0,0 +1,1175 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/bloombuild/protos/service.proto + +package protos + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type BuilderToPlanner struct { + BuilderID string `protobuf:"bytes,1,opt,name=builderID,proto3" json:"builderID,omitempty"` +} + +func (m *BuilderToPlanner) Reset() { *m = BuilderToPlanner{} } +func (*BuilderToPlanner) ProtoMessage() {} +func (*BuilderToPlanner) Descriptor() ([]byte, []int) { + return fileDescriptor_89de33e08b859356, []int{0} +} +func (m *BuilderToPlanner) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuilderToPlanner) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuilderToPlanner.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BuilderToPlanner) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuilderToPlanner.Merge(m, src) +} +func (m *BuilderToPlanner) XXX_Size() int { + return m.Size() +} +func (m *BuilderToPlanner) XXX_DiscardUnknown() { + xxx_messageInfo_BuilderToPlanner.DiscardUnknown(m) +} + +var xxx_messageInfo_BuilderToPlanner proto.InternalMessageInfo + +func (m *BuilderToPlanner) GetBuilderID() string { + if m != nil { + return m.BuilderID + } + return "" +} + +type PlannerToBuilder struct { + Task *ProtoTask `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` +} + +func (m *PlannerToBuilder) Reset() { *m = PlannerToBuilder{} } +func (*PlannerToBuilder) ProtoMessage() {} +func (*PlannerToBuilder) Descriptor() ([]byte, []int) { + return fileDescriptor_89de33e08b859356, []int{1} +} +func (m *PlannerToBuilder) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlannerToBuilder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PlannerToBuilder.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PlannerToBuilder) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlannerToBuilder.Merge(m, src) +} +func (m *PlannerToBuilder) XXX_Size() int { + return m.Size() +} +func (m *PlannerToBuilder) XXX_DiscardUnknown() { + xxx_messageInfo_PlannerToBuilder.DiscardUnknown(m) +} + +var xxx_messageInfo_PlannerToBuilder proto.InternalMessageInfo + +func (m *PlannerToBuilder) GetTask() *ProtoTask { + if m != nil { + return m.Task + } + return nil +} + +type NotifyBuilderShutdownRequest struct { + BuilderID string `protobuf:"bytes,1,opt,name=builderID,proto3" json:"builderID,omitempty"` +} + +func (m *NotifyBuilderShutdownRequest) Reset() { *m = NotifyBuilderShutdownRequest{} } +func (*NotifyBuilderShutdownRequest) ProtoMessage() {} +func (*NotifyBuilderShutdownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_89de33e08b859356, []int{2} +} +func (m *NotifyBuilderShutdownRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NotifyBuilderShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NotifyBuilderShutdownRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NotifyBuilderShutdownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotifyBuilderShutdownRequest.Merge(m, src) +} +func (m *NotifyBuilderShutdownRequest) XXX_Size() int { + return m.Size() +} +func (m *NotifyBuilderShutdownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NotifyBuilderShutdownRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NotifyBuilderShutdownRequest proto.InternalMessageInfo + +func (m *NotifyBuilderShutdownRequest) GetBuilderID() string { + if m != nil { + return m.BuilderID + } + return "" +} + +type NotifyBuilderShutdownResponse struct { +} + +func (m *NotifyBuilderShutdownResponse) Reset() { *m = NotifyBuilderShutdownResponse{} } +func (*NotifyBuilderShutdownResponse) ProtoMessage() {} +func (*NotifyBuilderShutdownResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_89de33e08b859356, []int{3} +} +func (m *NotifyBuilderShutdownResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NotifyBuilderShutdownResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NotifyBuilderShutdownResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NotifyBuilderShutdownResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotifyBuilderShutdownResponse.Merge(m, src) +} +func (m *NotifyBuilderShutdownResponse) XXX_Size() int { + return m.Size() +} +func (m *NotifyBuilderShutdownResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NotifyBuilderShutdownResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NotifyBuilderShutdownResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BuilderToPlanner)(nil), "protos.BuilderToPlanner") + proto.RegisterType((*PlannerToBuilder)(nil), "protos.PlannerToBuilder") + proto.RegisterType((*NotifyBuilderShutdownRequest)(nil), "protos.NotifyBuilderShutdownRequest") + proto.RegisterType((*NotifyBuilderShutdownResponse)(nil), "protos.NotifyBuilderShutdownResponse") +} + +func init() { + proto.RegisterFile("pkg/bloombuild/protos/service.proto", fileDescriptor_89de33e08b859356) +} + +var fileDescriptor_89de33e08b859356 = []byte{ + // 323 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2e, 0xc8, 0x4e, 0xd7, + 0x4f, 0xca, 0xc9, 0xcf, 0xcf, 0x4d, 0x2a, 0xcd, 0xcc, 0x49, 0xd1, 0x2f, 0x28, 0xca, 0x2f, 0xc9, + 0x2f, 0xd6, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x03, 0x73, 0x85, 0xd8, 0x20, 0xa2, + 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xb6, 0x3e, 0x88, 0x05, 0x91, 0x95, 0x52, 0xc4, 0x6e, + 0x44, 0x49, 0x65, 0x41, 0x6a, 0x31, 0x44, 0x89, 0x92, 0x01, 0x97, 0x80, 0x13, 0x48, 0x2e, 0xb5, + 0x28, 0x24, 0x3f, 0x20, 0x27, 0x31, 0x2f, 0x2f, 0xb5, 0x48, 0x48, 0x86, 0x8b, 0x33, 0x09, 0x22, + 0xe6, 0xe9, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x84, 0x10, 0x50, 0xb2, 0xe4, 0x12, 0x80, + 0x2a, 0x0c, 0xc9, 0x87, 0x6a, 0x15, 0x52, 0xe5, 0x62, 0x29, 0x49, 0x2c, 0xce, 0x06, 0x2b, 0xe6, + 0x36, 0x12, 0x84, 0x98, 0x5d, 0xac, 0x17, 0x00, 0xa2, 0x42, 0x12, 0x8b, 0xb3, 0x83, 0xc0, 0xd2, + 0x4a, 0x36, 0x5c, 0x32, 0x7e, 0xf9, 0x25, 0x99, 0x69, 0x95, 0x50, 0x7d, 0xc1, 0x19, 0xa5, 0x25, + 0x29, 0xf9, 0xe5, 0x79, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x04, 0x2c, 0x96, 0xe7, 0x92, + 0xc5, 0xa1, 0xbb, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0xd5, 0xe8, 0x08, 0x23, 0x97, 0x20, 0xd4, 0x69, + 0x6e, 0xf9, 0x45, 0x30, 0xb7, 0xb9, 0x73, 0x71, 0x43, 0x99, 0x3e, 0xf9, 0xf9, 0x05, 0x42, 0x12, + 0x30, 0xc7, 0xa1, 0x7b, 0x5b, 0x0a, 0x2e, 0x83, 0xee, 0x3d, 0x25, 0x06, 0x0d, 0x46, 0x03, 0x46, + 0xa1, 0x34, 0x2e, 0x51, 0xac, 0xf6, 0x0b, 0xa9, 0xc0, 0x34, 0xe2, 0xf3, 0x9c, 0x94, 0x2a, 0x01, + 0x55, 0x10, 0x4f, 0x28, 0x31, 0x38, 0xd9, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, + 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, + 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, + 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x82, + 0xa6, 0x84, 0x24, 0x08, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x28, 0x86, 0x3f, 0xfe, 0x3f, + 0x02, 0x00, 0x00, +} + +func (this *BuilderToPlanner) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BuilderToPlanner) + if !ok { + that2, ok := that.(BuilderToPlanner) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BuilderID != that1.BuilderID { + return false + } + return true +} +func (this *PlannerToBuilder) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PlannerToBuilder) + if !ok { + that2, ok := that.(PlannerToBuilder) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + return true +} +func (this *NotifyBuilderShutdownRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NotifyBuilderShutdownRequest) + if !ok { + that2, ok := that.(NotifyBuilderShutdownRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BuilderID != that1.BuilderID { + return false + } + return true +} +func (this *NotifyBuilderShutdownResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NotifyBuilderShutdownResponse) + if !ok { + that2, ok := that.(NotifyBuilderShutdownResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *BuilderToPlanner) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&protos.BuilderToPlanner{") + s = append(s, "BuilderID: "+fmt.Sprintf("%#v", this.BuilderID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PlannerToBuilder) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&protos.PlannerToBuilder{") + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NotifyBuilderShutdownRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&protos.NotifyBuilderShutdownRequest{") + s = append(s, "BuilderID: "+fmt.Sprintf("%#v", this.BuilderID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NotifyBuilderShutdownResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&protos.NotifyBuilderShutdownResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringService(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// PlannerForBuilderClient is the client API for PlannerForBuilder service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PlannerForBuilderClient interface { + BuilderLoop(ctx context.Context, opts ...grpc.CallOption) (PlannerForBuilder_BuilderLoopClient, error) + NotifyBuilderShutdown(ctx context.Context, in *NotifyBuilderShutdownRequest, opts ...grpc.CallOption) (*NotifyBuilderShutdownResponse, error) +} + +type plannerForBuilderClient struct { + cc *grpc.ClientConn +} + +func NewPlannerForBuilderClient(cc *grpc.ClientConn) PlannerForBuilderClient { + return &plannerForBuilderClient{cc} +} + +func (c *plannerForBuilderClient) BuilderLoop(ctx context.Context, opts ...grpc.CallOption) (PlannerForBuilder_BuilderLoopClient, error) { + stream, err := c.cc.NewStream(ctx, &_PlannerForBuilder_serviceDesc.Streams[0], "/protos.PlannerForBuilder/BuilderLoop", opts...) + if err != nil { + return nil, err + } + x := &plannerForBuilderBuilderLoopClient{stream} + return x, nil +} + +type PlannerForBuilder_BuilderLoopClient interface { + Send(*BuilderToPlanner) error + Recv() (*PlannerToBuilder, error) + grpc.ClientStream +} + +type plannerForBuilderBuilderLoopClient struct { + grpc.ClientStream +} + +func (x *plannerForBuilderBuilderLoopClient) Send(m *BuilderToPlanner) error { + return x.ClientStream.SendMsg(m) +} + +func (x *plannerForBuilderBuilderLoopClient) Recv() (*PlannerToBuilder, error) { + m := new(PlannerToBuilder) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *plannerForBuilderClient) NotifyBuilderShutdown(ctx context.Context, in *NotifyBuilderShutdownRequest, opts ...grpc.CallOption) (*NotifyBuilderShutdownResponse, error) { + out := new(NotifyBuilderShutdownResponse) + err := c.cc.Invoke(ctx, "/protos.PlannerForBuilder/NotifyBuilderShutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PlannerForBuilderServer is the server API for PlannerForBuilder service. +type PlannerForBuilderServer interface { + BuilderLoop(PlannerForBuilder_BuilderLoopServer) error + NotifyBuilderShutdown(context.Context, *NotifyBuilderShutdownRequest) (*NotifyBuilderShutdownResponse, error) +} + +// UnimplementedPlannerForBuilderServer can be embedded to have forward compatible implementations. +type UnimplementedPlannerForBuilderServer struct { +} + +func (*UnimplementedPlannerForBuilderServer) BuilderLoop(srv PlannerForBuilder_BuilderLoopServer) error { + return status.Errorf(codes.Unimplemented, "method BuilderLoop not implemented") +} +func (*UnimplementedPlannerForBuilderServer) NotifyBuilderShutdown(ctx context.Context, req *NotifyBuilderShutdownRequest) (*NotifyBuilderShutdownResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NotifyBuilderShutdown not implemented") +} + +func RegisterPlannerForBuilderServer(s *grpc.Server, srv PlannerForBuilderServer) { + s.RegisterService(&_PlannerForBuilder_serviceDesc, srv) +} + +func _PlannerForBuilder_BuilderLoop_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(PlannerForBuilderServer).BuilderLoop(&plannerForBuilderBuilderLoopServer{stream}) +} + +type PlannerForBuilder_BuilderLoopServer interface { + Send(*PlannerToBuilder) error + Recv() (*BuilderToPlanner, error) + grpc.ServerStream +} + +type plannerForBuilderBuilderLoopServer struct { + grpc.ServerStream +} + +func (x *plannerForBuilderBuilderLoopServer) Send(m *PlannerToBuilder) error { + return x.ServerStream.SendMsg(m) +} + +func (x *plannerForBuilderBuilderLoopServer) Recv() (*BuilderToPlanner, error) { + m := new(BuilderToPlanner) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _PlannerForBuilder_NotifyBuilderShutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NotifyBuilderShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PlannerForBuilderServer).NotifyBuilderShutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/protos.PlannerForBuilder/NotifyBuilderShutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PlannerForBuilderServer).NotifyBuilderShutdown(ctx, req.(*NotifyBuilderShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _PlannerForBuilder_serviceDesc = grpc.ServiceDesc{ + ServiceName: "protos.PlannerForBuilder", + HandlerType: (*PlannerForBuilderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NotifyBuilderShutdown", + Handler: _PlannerForBuilder_NotifyBuilderShutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "BuilderLoop", + Handler: _PlannerForBuilder_BuilderLoop_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "pkg/bloombuild/protos/service.proto", +} + +func (m *BuilderToPlanner) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuilderToPlanner) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuilderToPlanner) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BuilderID) > 0 { + i -= len(m.BuilderID) + copy(dAtA[i:], m.BuilderID) + i = encodeVarintService(dAtA, i, uint64(len(m.BuilderID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PlannerToBuilder) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlannerToBuilder) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlannerToBuilder) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NotifyBuilderShutdownRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NotifyBuilderShutdownRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NotifyBuilderShutdownRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BuilderID) > 0 { + i -= len(m.BuilderID) + copy(dAtA[i:], m.BuilderID) + i = encodeVarintService(dAtA, i, uint64(len(m.BuilderID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NotifyBuilderShutdownResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NotifyBuilderShutdownResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NotifyBuilderShutdownResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + offset -= sovService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BuilderToPlanner) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BuilderID) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *PlannerToBuilder) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *NotifyBuilderShutdownRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BuilderID) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *NotifyBuilderShutdownResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozService(x uint64) (n int) { + return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BuilderToPlanner) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuilderToPlanner{`, + `BuilderID:` + fmt.Sprintf("%v", this.BuilderID) + `,`, + `}`, + }, "") + return s +} +func (this *PlannerToBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlannerToBuilder{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "ProtoTask", "ProtoTask", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NotifyBuilderShutdownRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NotifyBuilderShutdownRequest{`, + `BuilderID:` + fmt.Sprintf("%v", this.BuilderID) + `,`, + `}`, + }, "") + return s +} +func (this *NotifyBuilderShutdownResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NotifyBuilderShutdownResponse{`, + `}`, + }, "") + return s +} +func valueToStringService(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BuilderToPlanner) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuilderToPlanner: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuilderToPlanner: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuilderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuilderID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlannerToBuilder) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlannerToBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlannerToBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &ProtoTask{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NotifyBuilderShutdownRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NotifyBuilderShutdownRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NotifyBuilderShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuilderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuilderID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NotifyBuilderShutdownResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NotifyBuilderShutdownResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NotifyBuilderShutdownResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthService + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthService + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipService(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthService + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowService = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/bloombuild/protos/service.proto b/pkg/bloombuild/protos/service.proto new file mode 100644 index 0000000000000..e061684c41bea --- /dev/null +++ b/pkg/bloombuild/protos/service.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package protos; + +import "gogoproto/gogo.proto"; +import "pkg/bloombuild/protos/types.proto"; + +option go_package = "protos"; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service PlannerForBuilder { + rpc BuilderLoop(stream BuilderToPlanner) returns (stream PlannerToBuilder) {} + + rpc NotifyBuilderShutdown(NotifyBuilderShutdownRequest) returns (NotifyBuilderShutdownResponse) {} +} + +message BuilderToPlanner { + string builderID = 1; +} + +message PlannerToBuilder { + ProtoTask task = 1; +} + +message NotifyBuilderShutdownRequest { + string builderID = 1; +} + +message NotifyBuilderShutdownResponse { + // empty: just to acknowledge the request +} diff --git a/pkg/bloombuild/protos/types.pb.go b/pkg/bloombuild/protos/types.pb.go new file mode 100644 index 0000000000000..5d3b9bcb729a3 --- /dev/null +++ b/pkg/bloombuild/protos/types.pb.go @@ -0,0 +1,1255 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/bloombuild/protos/types.proto + +package protos + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_prometheus_common_model "github.com/prometheus/common/model" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// FPBounds is identical to the definition in `pkg/storage/bloom/v1/bounds.FingerprintBounds` +// which ensures we can cast between them without allocations. +// TODO(salvacorts): Reuse from `pkg/logproto/indexgateway.proto` +type ProtoFingerprintBounds struct { + Min github_com_prometheus_common_model.Fingerprint `protobuf:"varint,1,opt,name=min,proto3,casttype=github.com/prometheus/common/model.Fingerprint" json:"min"` + Max github_com_prometheus_common_model.Fingerprint `protobuf:"varint,2,opt,name=max,proto3,casttype=github.com/prometheus/common/model.Fingerprint" json:"max"` +} + +func (m *ProtoFingerprintBounds) Reset() { *m = ProtoFingerprintBounds{} } +func (*ProtoFingerprintBounds) ProtoMessage() {} +func (*ProtoFingerprintBounds) Descriptor() ([]byte, []int) { + return fileDescriptor_5325fb0610e1e9ae, []int{0} +} +func (m *ProtoFingerprintBounds) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProtoFingerprintBounds) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProtoFingerprintBounds.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProtoFingerprintBounds) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtoFingerprintBounds.Merge(m, src) +} +func (m *ProtoFingerprintBounds) XXX_Size() int { + return m.Size() +} +func (m *ProtoFingerprintBounds) XXX_DiscardUnknown() { + xxx_messageInfo_ProtoFingerprintBounds.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtoFingerprintBounds proto.InternalMessageInfo + +func (m *ProtoFingerprintBounds) GetMin() github_com_prometheus_common_model.Fingerprint { + if m != nil { + return m.Min + } + return 0 +} + +func (m *ProtoFingerprintBounds) GetMax() github_com_prometheus_common_model.Fingerprint { + if m != nil { + return m.Max + } + return 0 +} + +type ProtoGapWithBlocks struct { + Bounds ProtoFingerprintBounds `protobuf:"bytes,1,opt,name=bounds,proto3" json:"bounds"` + BlockRef []string `protobuf:"bytes,2,rep,name=blockRef,proto3" json:"blockRef,omitempty"` +} + +func (m *ProtoGapWithBlocks) Reset() { *m = ProtoGapWithBlocks{} } +func (*ProtoGapWithBlocks) ProtoMessage() {} +func (*ProtoGapWithBlocks) Descriptor() ([]byte, []int) { + return fileDescriptor_5325fb0610e1e9ae, []int{1} +} +func (m *ProtoGapWithBlocks) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProtoGapWithBlocks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProtoGapWithBlocks.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProtoGapWithBlocks) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtoGapWithBlocks.Merge(m, src) +} +func (m *ProtoGapWithBlocks) XXX_Size() int { + return m.Size() +} +func (m *ProtoGapWithBlocks) XXX_DiscardUnknown() { + xxx_messageInfo_ProtoGapWithBlocks.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtoGapWithBlocks proto.InternalMessageInfo + +func (m *ProtoGapWithBlocks) GetBounds() ProtoFingerprintBounds { + if m != nil { + return m.Bounds + } + return ProtoFingerprintBounds{} +} + +func (m *ProtoGapWithBlocks) GetBlockRef() []string { + if m != nil { + return m.BlockRef + } + return nil +} + +// TODO: Define BlockRef and SingleTenantTSDBIdentifier as messages so we can use them right away +// +// instead of unmarshaling them from strings or doing unsafe casts. +type ProtoTask struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"` + Tenant string `protobuf:"bytes,3,opt,name=tenant,proto3" json:"tenant,omitempty"` + Bounds ProtoFingerprintBounds `protobuf:"bytes,4,opt,name=bounds,proto3" json:"bounds"` + Tsdb string `protobuf:"bytes,5,opt,name=tsdb,proto3" json:"tsdb,omitempty"` + Gaps []*ProtoGapWithBlocks `protobuf:"bytes,6,rep,name=gaps,proto3" json:"gaps,omitempty"` +} + +func (m *ProtoTask) Reset() { *m = ProtoTask{} } +func (*ProtoTask) ProtoMessage() {} +func (*ProtoTask) Descriptor() ([]byte, []int) { + return fileDescriptor_5325fb0610e1e9ae, []int{2} +} +func (m *ProtoTask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProtoTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProtoTask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProtoTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtoTask.Merge(m, src) +} +func (m *ProtoTask) XXX_Size() int { + return m.Size() +} +func (m *ProtoTask) XXX_DiscardUnknown() { + xxx_messageInfo_ProtoTask.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtoTask proto.InternalMessageInfo + +func (m *ProtoTask) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ProtoTask) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *ProtoTask) GetTenant() string { + if m != nil { + return m.Tenant + } + return "" +} + +func (m *ProtoTask) GetBounds() ProtoFingerprintBounds { + if m != nil { + return m.Bounds + } + return ProtoFingerprintBounds{} +} + +func (m *ProtoTask) GetTsdb() string { + if m != nil { + return m.Tsdb + } + return "" +} + +func (m *ProtoTask) GetGaps() []*ProtoGapWithBlocks { + if m != nil { + return m.Gaps + } + return nil +} + +func init() { + proto.RegisterType((*ProtoFingerprintBounds)(nil), "protos.ProtoFingerprintBounds") + proto.RegisterType((*ProtoGapWithBlocks)(nil), "protos.ProtoGapWithBlocks") + proto.RegisterType((*ProtoTask)(nil), "protos.ProtoTask") +} + +func init() { proto.RegisterFile("pkg/bloombuild/protos/types.proto", fileDescriptor_5325fb0610e1e9ae) } + +var fileDescriptor_5325fb0610e1e9ae = []byte{ + // 393 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0x31, 0xeb, 0x9b, 0x40, + 0x18, 0xc6, 0x3d, 0xf5, 0x2f, 0xf5, 0x02, 0x19, 0x8e, 0x10, 0x24, 0xc3, 0x99, 0x66, 0xca, 0xa4, + 0x90, 0x4e, 0x85, 0x4e, 0x0e, 0xe9, 0xd0, 0xa5, 0x48, 0xa1, 0xd0, 0xed, 0x2e, 0x5e, 0xcd, 0x11, + 0xf5, 0x24, 0x77, 0x82, 0xdd, 0xfa, 0x11, 0xfa, 0x31, 0x3a, 0xf7, 0x53, 0x64, 0xcc, 0x52, 0xc8, + 0x24, 0x8d, 0x59, 0x4a, 0xa6, 0xcc, 0x9d, 0x8a, 0xa7, 0x94, 0x04, 0x3a, 0xb5, 0xd3, 0xfb, 0x3c, + 0xaf, 0xbe, 0xbf, 0xf7, 0x79, 0x45, 0xf8, 0xbc, 0xdc, 0xa5, 0x21, 0xcd, 0x84, 0xc8, 0x69, 0xc5, + 0xb3, 0x24, 0x2c, 0xf7, 0x42, 0x09, 0x19, 0xaa, 0x4f, 0x25, 0x93, 0x81, 0x36, 0xc8, 0xe9, 0x7b, + 0xb3, 0x49, 0x2a, 0x52, 0xa1, 0x75, 0xd8, 0xa9, 0xfe, 0xe9, 0xe2, 0x1b, 0x80, 0xd3, 0xb7, 0x9d, + 0x5a, 0xf3, 0x22, 0x65, 0xfb, 0x72, 0xcf, 0x0b, 0x15, 0x89, 0xaa, 0x48, 0x24, 0x7a, 0x03, 0xad, + 0x9c, 0x17, 0x1e, 0x98, 0x83, 0xa5, 0x1d, 0xbd, 0xbc, 0x36, 0x7e, 0x67, 0x7f, 0x35, 0x7e, 0x90, + 0x72, 0xb5, 0xad, 0x68, 0xb0, 0x11, 0x79, 0xb7, 0x2f, 0x67, 0x6a, 0xcb, 0x2a, 0x19, 0x6e, 0x44, + 0x9e, 0x8b, 0x22, 0xcc, 0x45, 0xc2, 0xb2, 0xe0, 0x8e, 0x16, 0x77, 0x63, 0x1a, 0x46, 0x6a, 0xcf, + 0xbc, 0x83, 0x91, 0xfa, 0x9f, 0x60, 0xa4, 0x5e, 0xd4, 0x10, 0xe9, 0xcc, 0xaf, 0x49, 0xf9, 0x9e, + 0xab, 0x6d, 0x94, 0x89, 0xcd, 0x4e, 0xa2, 0x35, 0x74, 0xa8, 0x4e, 0xae, 0x23, 0x8f, 0x56, 0xb8, + 0x3f, 0x51, 0x06, 0x7f, 0xbf, 0x2f, 0x1a, 0x1f, 0x1a, 0xdf, 0xb8, 0x36, 0xfe, 0x30, 0x15, 0x0f, + 0x15, 0xcd, 0xe0, 0x33, 0xda, 0x11, 0x63, 0xf6, 0xd1, 0x33, 0xe7, 0xd6, 0xd2, 0x8d, 0xff, 0xf8, + 0xc5, 0x77, 0x00, 0x5d, 0x8d, 0x7b, 0x47, 0xe4, 0x0e, 0x8d, 0xa1, 0xc9, 0x13, 0xbd, 0xcd, 0x8d, + 0x4d, 0x9e, 0xa0, 0x09, 0x7c, 0x52, 0x84, 0x66, 0x4c, 0x9f, 0xe9, 0xc6, 0xbd, 0x41, 0x53, 0xe8, + 0x28, 0x56, 0x90, 0x42, 0x79, 0x96, 0x6e, 0x0f, 0xee, 0x2e, 0xaf, 0xfd, 0x5f, 0x79, 0x11, 0xb4, + 0x95, 0x4c, 0xa8, 0xf7, 0xa4, 0xe9, 0x5a, 0xa3, 0x00, 0xda, 0x29, 0x29, 0xa5, 0xe7, 0xcc, 0xad, + 0xe5, 0x68, 0x35, 0x7b, 0x20, 0x3f, 0x7c, 0xb5, 0x58, 0xbf, 0x17, 0xbd, 0x3a, 0x9e, 0xb1, 0x71, + 0x3a, 0x63, 0xe3, 0x76, 0xc6, 0xe0, 0x73, 0x8b, 0xc1, 0xd7, 0x16, 0x83, 0x43, 0x8b, 0xc1, 0xb1, + 0xc5, 0xe0, 0x47, 0x8b, 0xc1, 0xcf, 0x16, 0x1b, 0xb7, 0x16, 0x83, 0x2f, 0x17, 0x6c, 0x1c, 0x2f, + 0xd8, 0x38, 0x5d, 0xb0, 0xf1, 0x61, 0xf8, 0xb5, 0x68, 0x5f, 0x5f, 0xfc, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x57, 0x05, 0xc7, 0x5d, 0x8e, 0x02, 0x00, 0x00, +} + +func (this *ProtoFingerprintBounds) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProtoFingerprintBounds) + if !ok { + that2, ok := that.(ProtoFingerprintBounds) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Min != that1.Min { + return false + } + if this.Max != that1.Max { + return false + } + return true +} +func (this *ProtoGapWithBlocks) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProtoGapWithBlocks) + if !ok { + that2, ok := that.(ProtoGapWithBlocks) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Bounds.Equal(&that1.Bounds) { + return false + } + if len(this.BlockRef) != len(that1.BlockRef) { + return false + } + for i := range this.BlockRef { + if this.BlockRef[i] != that1.BlockRef[i] { + return false + } + } + return true +} +func (this *ProtoTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProtoTask) + if !ok { + that2, ok := that.(ProtoTask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + if this.Table != that1.Table { + return false + } + if this.Tenant != that1.Tenant { + return false + } + if !this.Bounds.Equal(&that1.Bounds) { + return false + } + if this.Tsdb != that1.Tsdb { + return false + } + if len(this.Gaps) != len(that1.Gaps) { + return false + } + for i := range this.Gaps { + if !this.Gaps[i].Equal(that1.Gaps[i]) { + return false + } + } + return true +} +func (this *ProtoFingerprintBounds) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&protos.ProtoFingerprintBounds{") + s = append(s, "Min: "+fmt.Sprintf("%#v", this.Min)+",\n") + s = append(s, "Max: "+fmt.Sprintf("%#v", this.Max)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProtoGapWithBlocks) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&protos.ProtoGapWithBlocks{") + s = append(s, "Bounds: "+strings.Replace(this.Bounds.GoString(), `&`, ``, 1)+",\n") + s = append(s, "BlockRef: "+fmt.Sprintf("%#v", this.BlockRef)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProtoTask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&protos.ProtoTask{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "Table: "+fmt.Sprintf("%#v", this.Table)+",\n") + s = append(s, "Tenant: "+fmt.Sprintf("%#v", this.Tenant)+",\n") + s = append(s, "Bounds: "+strings.Replace(this.Bounds.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Tsdb: "+fmt.Sprintf("%#v", this.Tsdb)+",\n") + if this.Gaps != nil { + s = append(s, "Gaps: "+fmt.Sprintf("%#v", this.Gaps)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTypes(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ProtoFingerprintBounds) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProtoFingerprintBounds) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProtoFingerprintBounds) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Max != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + } + if m.Min != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProtoGapWithBlocks) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProtoGapWithBlocks) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProtoGapWithBlocks) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BlockRef) > 0 { + for iNdEx := len(m.BlockRef) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BlockRef[iNdEx]) + copy(dAtA[i:], m.BlockRef[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockRef[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Bounds.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProtoTask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProtoTask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProtoTask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Gaps) > 0 { + for iNdEx := len(m.Gaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Gaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.Tsdb) > 0 { + i -= len(m.Tsdb) + copy(dAtA[i:], m.Tsdb) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tsdb))) + i-- + dAtA[i] = 0x2a + } + { + size, err := m.Bounds.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x1a + } + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Table))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ProtoFingerprintBounds) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Min != 0 { + n += 1 + sovTypes(uint64(m.Min)) + } + if m.Max != 0 { + n += 1 + sovTypes(uint64(m.Max)) + } + return n +} + +func (m *ProtoGapWithBlocks) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Bounds.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.BlockRef) > 0 { + for _, s := range m.BlockRef { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ProtoTask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Table) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Tenant) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Bounds.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Tsdb) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Gaps) > 0 { + for _, e := range m.Gaps { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ProtoFingerprintBounds) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProtoFingerprintBounds{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *ProtoGapWithBlocks) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProtoGapWithBlocks{`, + `Bounds:` + strings.Replace(strings.Replace(this.Bounds.String(), "ProtoFingerprintBounds", "ProtoFingerprintBounds", 1), `&`, ``, 1) + `,`, + `BlockRef:` + fmt.Sprintf("%v", this.BlockRef) + `,`, + `}`, + }, "") + return s +} +func (this *ProtoTask) String() string { + if this == nil { + return "nil" + } + repeatedStringForGaps := "[]*ProtoGapWithBlocks{" + for _, f := range this.Gaps { + repeatedStringForGaps += strings.Replace(f.String(), "ProtoGapWithBlocks", "ProtoGapWithBlocks", 1) + "," + } + repeatedStringForGaps += "}" + s := strings.Join([]string{`&ProtoTask{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Table:` + fmt.Sprintf("%v", this.Table) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `Bounds:` + strings.Replace(strings.Replace(this.Bounds.String(), "ProtoFingerprintBounds", "ProtoFingerprintBounds", 1), `&`, ``, 1) + `,`, + `Tsdb:` + fmt.Sprintf("%v", this.Tsdb) + `,`, + `Gaps:` + repeatedStringForGaps + `,`, + `}`, + }, "") + return s +} +func valueToStringTypes(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ProtoFingerprintBounds) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProtoFingerprintBounds: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProtoFingerprintBounds: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= github_com_prometheus_common_model.Fingerprint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= github_com_prometheus_common_model.Fingerprint(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProtoGapWithBlocks) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProtoGapWithBlocks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProtoGapWithBlocks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bounds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Bounds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockRef = append(m.BlockRef, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProtoTask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProtoTask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProtoTask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bounds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Bounds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tsdb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tsdb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gaps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gaps = append(m.Gaps, &ProtoGapWithBlocks{}) + if err := m.Gaps[len(m.Gaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/bloombuild/protos/types.proto b/pkg/bloombuild/protos/types.proto new file mode 100644 index 0000000000000..58ea8ee4679e4 --- /dev/null +++ b/pkg/bloombuild/protos/types.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package protos; + +import "gogoproto/gogo.proto"; + +option go_package = "protos"; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// FPBounds is identical to the definition in `pkg/storage/bloom/v1/bounds.FingerprintBounds` +// which ensures we can cast between them without allocations. +// TODO(salvacorts): Reuse from `pkg/logproto/indexgateway.proto` +message ProtoFingerprintBounds { + uint64 min = 1 [ + (gogoproto.casttype) = "github.com/prometheus/common/model.Fingerprint", + (gogoproto.jsontag) = "min" + ]; + uint64 max = 2 [ + (gogoproto.casttype) = "github.com/prometheus/common/model.Fingerprint", + (gogoproto.jsontag) = "max" + ]; +} + +message ProtoGapWithBlocks { + ProtoFingerprintBounds bounds = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "bounds" + ]; + repeated string blockRef = 2; +} + +// TODO: Define BlockRef and SingleTenantTSDBIdentifier as messages so we can use them right away +// instead of unmarshaling them from strings or doing unsafe casts. +message ProtoTask { + string id = 1; + string table = 2; + string tenant = 3; + ProtoFingerprintBounds bounds = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "bounds" + ]; + string tsdb = 5; + repeated ProtoGapWithBlocks gaps = 6; +} From 2171f6409f7157888df9637a635664c67b7ca844 Mon Sep 17 00:00:00 2001 From: benclive Date: Tue, 28 May 2024 09:47:34 +0100 Subject: [PATCH 07/12] fix: Fix panic on requesting out-of-order Pattern samples (#13010) --- pkg/pattern/drain/chunk.go | 8 ++++++++ pkg/pattern/drain/chunk_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/pkg/pattern/drain/chunk.go b/pkg/pattern/drain/chunk.go index 9b1e34e2e3a19..1333299467585 100644 --- a/pkg/pattern/drain/chunk.go +++ b/pkg/pattern/drain/chunk.go @@ -66,6 +66,11 @@ func (c Chunk) ForRange(start, end, step model.Time) []logproto.PatternSample { return c.Samples[i].Timestamp >= end }) } + + if c.Samples[lo].Timestamp > c.Samples[hi-1].Timestamp { + return nil + } + if step == TimeResolution { return c.Samples[lo:hi] } @@ -110,6 +115,9 @@ func (c *Chunks) Add(ts model.Time) { *c = append(*c, newChunk(t)) return } + if ts.Before(last.Samples[len(last.Samples)-1].Timestamp) { + return + } last.Samples = append(last.Samples, logproto.PatternSample{ Timestamp: t, Value: 1, diff --git a/pkg/pattern/drain/chunk_test.go b/pkg/pattern/drain/chunk_test.go index 4863a6629729a..17429da594e19 100644 --- a/pkg/pattern/drain/chunk_test.go +++ b/pkg/pattern/drain/chunk_test.go @@ -21,6 +21,9 @@ func TestAdd(t *testing.T) { cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) + TimeResolution + 1) require.Equal(t, 2, len(cks)) require.Equal(t, 1, len(cks[1].Samples)) + cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) - TimeResolution) + require.Equal(t, 2, len(cks)) + require.Equalf(t, 1, len(cks[1].Samples), "Older samples should not be added if they arrive out of order") } func TestIterator(t *testing.T) { @@ -52,6 +55,7 @@ func TestForRange(t *testing.T) { c *Chunk start model.Time end model.Time + step model.Time expected []logproto.PatternSample }{ { @@ -180,6 +184,28 @@ func TestForRange(t *testing.T) { {Timestamp: 4, Value: 10}, }, }, + { + name: "Out-of-order samples generate nil result", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 5, Value: 2}, + {Timestamp: 3, Value: 2}, + }}, + start: 4, + end: 6, + expected: nil, + }, + { + name: "Internally out-of-order samples generate nil result", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 5, Value: 2}, + {Timestamp: 3, Value: 2}, + {Timestamp: 7, Value: 2}, + }}, + start: 2, + end: 6, + expected: nil, + }, } for _, tc := range testCases { From 1432a3e84a7e5df18b8dc0e217121fd78da9e75e Mon Sep 17 00:00:00 2001 From: Jay Clifford <45856600+Jayclifford345@users.noreply.github.com> Date: Tue, 28 May 2024 11:44:48 -0400 Subject: [PATCH 08/12] feat: Added video and updated Grafana Agent -> Alloy (#13032) Co-authored-by: J Stickler --- docs/sources/send-data/_index.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/send-data/_index.md b/docs/sources/send-data/_index.md index 4fd5f7681ea5b..2064860dbbcdd 100644 --- a/docs/sources/send-data/_index.md +++ b/docs/sources/send-data/_index.md @@ -12,6 +12,8 @@ weight: 500 There are a number of different clients available to send log data to Loki. While all clients can be used simultaneously to cover multiple use cases, which client is initially picked to send logs depends on your use case. +{{< youtube id="xtEppndO7F8" >}} + ## Grafana Clients The following clients are developed and supported (for those customers who have purchased a support contract) by Grafana Labs for sending logs to Loki: From d6374bc2ce3041005842edd353a3bb010f467abe Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Tue, 28 May 2024 20:09:08 +0200 Subject: [PATCH 09/12] feat(blooms): Add counter metric for blocks that are not available at query time (#12968) When filtering chunks on the bloom gateway, bloom block may not be available and they will be downloaded asynchronously in the background. This new metric `loki_bloom_gateway_blocks_not_available_total` counts the blocks that are not available at query time. Signed-off-by: Christian Haudum --- pkg/bloomgateway/bloomgateway.go | 19 ++++++++++--------- pkg/bloomgateway/metrics.go | 19 +++++++++++++------ pkg/bloomgateway/processor.go | 2 +- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index ee0e6f9940fd2..165e2d652473b 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -290,6 +290,13 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk g.activeUsers.UpdateUserTimestamp(tenantID, time.Now()) + var preFilterSeries, preFilterChunks int + + preFilterSeries = len(req.Refs) + for _, series := range req.Refs { + preFilterChunks += len(series.Refs) + } + // Ideally we could use an unbuffered channel here, but since we return the // request on the first error, there can be cases where the request context // is not done yet and the consumeTask() function wants to send to the @@ -316,13 +323,6 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk remaining := len(tasks) - preFilterSeries := len(req.Refs) - var preFilterChunks, postFilterChunks int - - for _, series := range req.Refs { - preFilterChunks += len(series.Refs) - } - combinedRecorder := v1.NewBloomRecorder(ctx, "combined") for remaining > 0 { select { @@ -353,11 +353,12 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk responsesPool.Put(resp) } - postFilterSeries := len(filtered) - + var postFilterSeries, postFilterChunks int + postFilterSeries = len(filtered) for _, group := range filtered { postFilterChunks += len(group.Refs) } + g.metrics.requestedSeries.Observe(float64(preFilterSeries)) g.metrics.filteredSeries.Observe(float64(preFilterSeries - postFilterSeries)) g.metrics.requestedChunks.Observe(float64(preFilterChunks)) diff --git a/pkg/bloomgateway/metrics.go b/pkg/bloomgateway/metrics.go index 0885bc2ae7cb4..5c046d3147c34 100644 --- a/pkg/bloomgateway/metrics.go +++ b/pkg/bloomgateway/metrics.go @@ -116,12 +116,13 @@ func newServerMetrics(registerer prometheus.Registerer, namespace, subsystem str } type workerMetrics struct { - dequeueDuration *prometheus.HistogramVec - queueDuration *prometheus.HistogramVec - processDuration *prometheus.HistogramVec - tasksDequeued *prometheus.CounterVec - tasksProcessed *prometheus.CounterVec - blockQueryLatency *prometheus.HistogramVec + dequeueDuration *prometheus.HistogramVec + queueDuration *prometheus.HistogramVec + processDuration *prometheus.HistogramVec + tasksDequeued *prometheus.CounterVec + tasksProcessed *prometheus.CounterVec + blocksNotAvailable *prometheus.CounterVec + blockQueryLatency *prometheus.HistogramVec } func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem string) *workerMetrics { @@ -158,6 +159,12 @@ func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem str Name: "tasks_processed_total", Help: "Total amount of tasks that the worker processed", }, append(labels, "status")), + blocksNotAvailable: r.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "blocks_not_available_total", + Help: "Total amount of blocks that have been skipped because they were not found or not downloaded yet", + }, labels), blockQueryLatency: r.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go index 1e8452ded5d66..6973ad1f565b7 100644 --- a/pkg/bloomgateway/processor.go +++ b/pkg/bloomgateway/processor.go @@ -126,7 +126,7 @@ func (p *processor) processBlocks(ctx context.Context, bqs []*bloomshipper.Close return concurrency.ForEachJob(ctx, len(bqs), p.concurrency, func(ctx context.Context, i int) error { bq := bqs[i] if bq == nil { - // TODO(chaudum): Add metric for skipped blocks + p.metrics.blocksNotAvailable.WithLabelValues(p.id).Inc() return nil } From 1ab9d271c354caf0ba589691e6477fb9a19039f0 Mon Sep 17 00:00:00 2001 From: archimeid <68751170+archimeid@users.noreply.github.com> Date: Tue, 28 May 2024 19:35:28 +0100 Subject: [PATCH 10/12] fix(helm): fix query-frontend and ruler targetPort 'http-metrics' in Service template (#13024) --- production/helm/loki/CHANGELOG.md | 4 ++++ production/helm/loki/Chart.yaml | 2 +- .../query-frontend/service-query-frontend-headless.yaml | 2 +- production/helm/loki/templates/ruler/service-ruler.yaml | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index a86a7d0281825..1bb316cf32021 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.6.2 + +- [BUGFIX] Fix query-frontend (headless) and ruler http-metrics targetPort + ## 6.6.1 - [BUGFIX] Fix query scheduler http-metrics targetPort diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index d2ec2d3d9e59b..47606001b954e 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.0.0 -version: 6.6.1 +version: 6.6.2 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml b/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml index 258413aa1d570..b168ce6ce9520 100644 --- a/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml +++ b/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml @@ -22,7 +22,7 @@ spec: ports: - name: http-metrics port: 3100 - targetPort: http + targetPort: http-metrics protocol: TCP - name: grpc port: 9095 diff --git a/production/helm/loki/templates/ruler/service-ruler.yaml b/production/helm/loki/templates/ruler/service-ruler.yaml index 8200af2b69a95..1a1f0f4d2e91a 100644 --- a/production/helm/loki/templates/ruler/service-ruler.yaml +++ b/production/helm/loki/templates/ruler/service-ruler.yaml @@ -19,7 +19,7 @@ spec: ports: - name: http-metrics port: 3100 - targetPort: http + targetPort: http-metrics protocol: TCP - name: grpc port: 9095 From 6ec4712aaa12d68e320681694678af881a0f1e35 Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Wed, 29 May 2024 00:44:15 +0530 Subject: [PATCH 11/12] docs: update otlp ingestion docs to correct some info and add more details (#12969) --- docs/sources/send-data/otel/_index.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/sources/send-data/otel/_index.md b/docs/sources/send-data/otel/_index.md index b7a67fcb14d06..6fa17c317054e 100644 --- a/docs/sources/send-data/otel/_index.md +++ b/docs/sources/send-data/otel/_index.md @@ -72,7 +72,7 @@ service: Since the OpenTelemetry protocol differs from the Loki storage model, here is how data in the OpenTelemetry format will be mapped by default to the Loki data model during ingestion, which can be changed as explained later: -- Index labels: Resource attributes map well to index labels in Loki, since both usually identify the source of the logs. Because Loki has a limit of 30 index labels, we have selected the following resource attributes to be stored as index labels, while the remaining attributes are stored as [Structured Metadata]({{< relref "../../get-started/labels/structured-metadata" >}}) with each log entry: +- Index labels: Resource attributes map well to index labels in Loki, since both usually identify the source of the logs. The default list of Resource Attributes to store as Index labels can be configured using `default_resource_attributes_as_index_labels` under [distributor's otlp_config](https://grafana.com/docs/loki//configure/#distributor). By default, the following resource attributes will be stored as index labels, while the remaining attributes are stored as [Structured Metadata]({{< relref "../../get-started/labels/structured-metadata" >}}) with each log entry: - cloud.availability_zone - cloud.region - container.name @@ -91,6 +91,10 @@ Since the OpenTelemetry protocol differs from the Loki storage model, here is ho - service.name - service.namespace + {{% admonition type="note" %}} + Because Loki has a default limit of 15 index labels, we recommend storing only select resource attributes as index labels. Although the default config selects more than 15 Resource Attributes, it should be fine since a few are mutually exclusive. + {{% /admonition %}} + - Timestamp: One of `LogRecord.TimeUnixNano` or `LogRecord.ObservedTimestamp`, based on which one is set. If both are not set, the ingestion timestamp will be used. - LogLine: `LogRecord.Body` holds the body of the log. However, since Loki only supports Log body in string format, we will stringify non-string values using the [AsString method from the OTEL collector lib](https://github.com/open-telemetry/opentelemetry-collector/blob/ab3d6c5b64701e690aaa340b0a63f443ff22c1f0/pdata/pcommon/value.go#L353). From 74a5bbc5caa3cea306aa7047b73fb81738d80872 Mon Sep 17 00:00:00 2001 From: J Stickler Date: Tue, 28 May 2024 15:54:56 -0400 Subject: [PATCH 12/12] docs: Update Grafana Agent to Grafana Alloy (#12602) --- .../get-started/labels/structured-metadata.md | 4 +-- docs/sources/get-started/overview.md | 2 +- docs/sources/operations/loki-canary/_index.md | 2 +- docs/sources/release-notes/v3.0.md | 4 ++- docs/sources/send-data/_index.md | 26 +++++++++++-------- docs/sources/send-data/k6/log-generation.md | 4 +-- docs/sources/send-data/otel/_index.md | 8 +++--- docs/sources/send-data/promtail/_index.md | 4 +++ .../send-data/promtail/installation.md | 4 +++ docs/sources/setup/install/helm/concepts.md | 2 +- .../migrate/migrate-from-distributed/index.md | 2 +- .../setup/migrate/migrate-to-alloy/_index.md | 25 ++++++++++++++++++ .../setup/migrate/migrate-to-tsdb/_index.md | 2 +- 13 files changed, 64 insertions(+), 25 deletions(-) create mode 100644 docs/sources/setup/migrate/migrate-to-alloy/_index.md diff --git a/docs/sources/get-started/labels/structured-metadata.md b/docs/sources/get-started/labels/structured-metadata.md index 99f46f7087925..91fe5d80ab676 100644 --- a/docs/sources/get-started/labels/structured-metadata.md +++ b/docs/sources/get-started/labels/structured-metadata.md @@ -21,7 +21,7 @@ Structured metadata can also be used to query commonly needed metadata from log You should only use structured metadata in the following situations: -- If you are ingesting data in OpenTelemetry format, using the Grafana Agent or an OpenTelemetry Collector. Structured metadata was designed to support native ingestion of OpenTelemetry data. +- If you are ingesting data in OpenTelemetry format, using Grafana Alloy or an OpenTelemetry Collector. Structured metadata was designed to support native ingestion of OpenTelemetry data. - If you have high cardinality metadata that should not be used as a label and does not exist in the log line. Some examples might include `process_id` or `thread_id` or Kubernetes pod names. It is an antipattern to extract information that already exists in your log lines and put it into structured metadata. @@ -31,7 +31,7 @@ It is an antipattern to extract information that already exists in your log line You have the option to attach structured metadata to log lines in the push payload along with each log line and the timestamp. For more information on how to push logs to Loki via the HTTP endpoint, refer to the [HTTP API documentation](https://grafana.com/docs/loki//reference/api/#ingest-logs). -Alternatively, you can use the Grafana Agent or Promtail to extract and attach structured metadata to your log lines. +Alternatively, you can use Grafana Alloy or Promtail to extract and attach structured metadata to your log lines. See the [Promtail: Structured metadata stage](https://grafana.com/docs/loki//send-data/promtail/stages/structured_metadata/) for more information. With Loki version 1.2.0, support for structured metadata has been added to the Logstash output plugin. For more information, see [logstash](https://grafana.com/docs/loki//send-data/logstash/). diff --git a/docs/sources/get-started/overview.md b/docs/sources/get-started/overview.md index 4051ba63cc11d..1194398c38f0c 100644 --- a/docs/sources/get-started/overview.md +++ b/docs/sources/get-started/overview.md @@ -22,7 +22,7 @@ Log data is then compressed and stored in chunks in an object store such as Amaz A typical Loki-based logging stack consists of 3 components: -- **Agent** - An agent or client, for example Promtail, which is distributed with Loki, or the Grafana Agent. The agent scrapes logs, turns the logs into streams by adding labels, and pushes the streams to Loki through an HTTP API. +- **Agent** - An agent or client, for example Grafana Alloy, or Promtail, which is distributed with Loki. The agent scrapes logs, turns the logs into streams by adding labels, and pushes the streams to Loki through an HTTP API. - **Loki** - The main server, responsible for ingesting and storing logs and processing queries. It can be deployed in three different configurations, for more information see [deployment modes]({{< relref "../get-started/deployment-modes" >}}). diff --git a/docs/sources/operations/loki-canary/_index.md b/docs/sources/operations/loki-canary/_index.md index cf2a1075d3c06..f6c1bf23a9388 100644 --- a/docs/sources/operations/loki-canary/_index.md +++ b/docs/sources/operations/loki-canary/_index.md @@ -29,7 +29,7 @@ array. The contents look something like this: The relevant part of the log entry is the timestamp; the `p`s are just filler bytes to make the size of the log configurable. -An agent (like Promtail) should be configured to read the log file and ship it +An agent (like Grafana Alloy) should be configured to read the log file and ship it to Loki. Meanwhile, Loki Canary will open a WebSocket connection to Loki and will tail diff --git a/docs/sources/release-notes/v3.0.md b/docs/sources/release-notes/v3.0.md index a44483d57d2f4..ea3c7603ff820 100644 --- a/docs/sources/release-notes/v3.0.md +++ b/docs/sources/release-notes/v3.0.md @@ -20,7 +20,7 @@ Key features in Loki 3.0.0 include the following: - **Query acceleration with Bloom filters** (experimental): This is designed to speed up filter queries, with best results for queries that are looking for a specific text string like an error message or UUID. For more information, refer to [Query acceleration with Blooms](https://grafana.com/docs/loki//operations/query-acceleration-blooms/). -- **Native OpenTelemetry Support**: A simplified ingestion pipeline (Loki Exporter no longer needed) and a more intuitive query experience for OTel logs. For more information, refer to the [OTEL documentation](https://grafana.com/docs/loki//send-data/otel/). +- **Native OpenTelemetry Support**: A simplified ingestion pipeline (Loki Exporter no longer needed) and a more intuitive query experience for OTel logs. For more information, refer to the [OTel documentation](https://grafana.com/docs/loki//send-data/otel/). - **Helm charts**: A major upgrade to the Loki helm chart introduces support for `Distributed` mode (also known as [microservices](https://grafana.com/docs/loki//get-started/deployment-modes/#microservices-mode) mode), includes memcached by default, and includes several updates to configurations to improve Loki operations. @@ -46,6 +46,8 @@ One of the focuses of Loki 3.0 was cleaning up unused code and old features that To learn more about breaking changes in this release, refer to the [Upgrade guide](https://grafana.com/docs/loki//setup/upgrade/). +{{< docs/shared source="alloy" lookup="agent-deprecation.md" version="next" >}} + ## Upgrade Considerations The path from 2.9 to 3.0 includes several breaking changes. For important upgrade guidance, refer to the [Upgrade Guide](https://grafana.com/docs/loki//setup/upgrade/) and the separate [Helm Upgrade Guide](https://grafana.com/docs/loki//setup/upgrade/upgrade-to-6x/). diff --git a/docs/sources/send-data/_index.md b/docs/sources/send-data/_index.md index 2064860dbbcdd..0ef9432d3caf3 100644 --- a/docs/sources/send-data/_index.md +++ b/docs/sources/send-data/_index.md @@ -18,16 +18,20 @@ While all clients can be used simultaneously to cover multiple use cases, which The following clients are developed and supported (for those customers who have purchased a support contract) by Grafana Labs for sending logs to Loki: -- [Grafana Agent](/docs/agent/latest/) - The Grafana Agent is the recommended client for the Grafana stack. It can collect telemetry data for metrics, logs, traces, and continuous profiles and is fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems. -- [Promtail]({{< relref "./promtail" >}}) - Promtail is the client of choice when you're running Kubernetes, as you can configure it to automatically scrape logs from pods running on the same node that Promtail runs on. Promtail and Prometheus running together in Kubernetes enables powerful debugging: if Prometheus and Promtail use the same labels, users can use tools like Grafana to switch between metrics and logs based on the label set. -Promtail is also the client of choice on bare-metal since it can be configured to tail logs from all files given a host path. It is the easiest way to send logs to Loki from plain-text files (for example, things that log to `/var/log/*.log`). -Lastly, Promtail works well if you want to extract metrics from logs such as counting the occurrences of a particular message. -- [xk6-loki extension](https://github.com/grafana/xk6-loki) - The k6-loki extension lets you perform [load testing on Loki]({{< relref "./k6" >}}). +- [Grafana Alloy](https://grafana.com/docs/alloy/latest/) - Grafana Alloy is a vendor-neutral distribution of the OpenTelemetry (OTel) Collector. Alloy offers native pipelines for OTel, Prometheus, Pyroscope, Loki, and many other metrics, logs, traces, and profile tools. In addition, you can use Alloy pipelines to do different tasks, such as configure alert rules in Loki and Mimir. Alloy is fully compatible with the OTel Collector, Prometheus Agent, and Promtail. You can use Alloy as an alternative to either of these solutions or combine it into a hybrid system of multiple collectors and agents. You can deploy Alloy anywhere within your IT infrastructure and pair it with your Grafana LGTM stack, a telemetry backend from Grafana Cloud, or any other compatible backend from any other vendor. + {{< docs/shared source="alloy" lookup="agent-deprecation.md" version="next" >}} +- [Grafana Agent](/docs/agent/latest/) - The Grafana Agent is a client for the Grafana stack. It can collect telemetry data for metrics, logs, traces, and continuous profiles and is fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems. +- [Promtail](https://grafana.com/docs/loki//send-data/promtail/) - Promtail can be configured to automatically scrape logs from Kubernetes pods running on the same node that Promtail runs on. Promtail and Prometheus running together in Kubernetes enables powerful debugging: if Prometheus and Promtail use the same labels, users can use tools like Grafana to switch between metrics and logs based on the label set. Promtail can be configured to tail logs from all files given a host path. It is the easiest way to send logs to Loki from plain-text files (for example, things that log to `/var/log/*.log`). +Promtail works well if you want to extract metrics from logs such as counting the occurrences of a particular message. +{{< admonition type="note" >}} +Promtail is feature complete. All future feature development will occur in Grafana Alloy. +{{< /admonition >}} +- [xk6-loki extension](https://github.com/grafana/xk6-loki) - The k6-loki extension lets you perform [load testing on Loki](https://grafana.com/docs/loki//send-data/k6/). ## OpenTelemetry Collector Loki natively supports ingesting OpenTelemetry logs over HTTP. -See [Ingesting logs to Loki using OpenTelemetry Collector]({{< relref "./otel" >}}) for more details. +For more information, see [Ingesting logs to Loki using OpenTelemetry Collector](https://grafana.com/docs/loki//send-data/otel/). ## Third-party clients @@ -39,14 +43,14 @@ Grafana Labs cannot provide support for third-party clients. Once an issue has b The following are popular third-party Loki clients: -- [Docker Driver]({{< relref "./docker-driver" >}}) - When using Docker and not Kubernetes, the Docker logging driver for Loki should +- [Docker Driver](https://grafana.com/docs/loki//send-data/docker-driver/) - When using Docker and not Kubernetes, the Docker logging driver for Loki should be used as it automatically adds labels appropriate to the running container. -- [Fluent Bit]({{< relref "./fluentbit" >}}) - The Fluent Bit plugin is ideal when you already have Fluentd deployed +- [Fluent Bit](https://grafana.com/docs/loki//send-data/fluentbit/) - The Fluent Bit plugin is ideal when you already have Fluentd deployed and you already have configured `Parser` and `Filter` plugins. -- [Fluentd]({{< relref "./fluentd" >}}) - The Fluentd plugin is ideal when you already have Fluentd deployed +- [Fluentd](https://grafana.com/docs/loki//send-data/fluentd/) - The Fluentd plugin is ideal when you already have Fluentd deployed and you already have configured `Parser` and `Filter` plugins. Fluentd also works well for extracting metrics from logs when using itsPrometheus plugin. -- [Lambda Promtail]({{< relref "./lambda-promtail" >}}) - This is a workflow combining the Promtail push-api [scrape config]({{< relref "./promtail/configuration#loki_push_api" >}}) and the [lambda-promtail]({{< relref "./lambda-promtail" >}}) AWS Lambda function which pipes logs from Cloudwatch to Loki. This is a good choice if you're looking to try out Loki in a low-footprint way or if you wish to monitor AWS lambda logs in Loki -- [Logstash]({{< relref "./logstash" >}}) - If you are already using logstash and/or beats, this will be the easiest way to start. +- [Lambda Promtail](https://grafana.com/docs/loki//send-data/lambda-promtail/) - This is a workflow combining the Promtail push-api [scrape config](https://grafana.com/docs/loki//send-data/promtail/configuration/#loki_push_api) and the lambda-promtail AWS Lambda function which pipes logs from Cloudwatch to Loki. This is a good choice if you're looking to try out Loki in a low-footprint way or if you wish to monitor AWS lambda logs in Loki +- [Logstash](https://grafana.com/docs/loki//send-data/logstash/) - If you are already using logstash and/or beats, this will be the easiest way to start. By adding our output plugin you can quickly try Loki without doing big configuration changes. These third-party clients also enable sending logs to Loki: diff --git a/docs/sources/send-data/k6/log-generation.md b/docs/sources/send-data/k6/log-generation.md index 635f042f90b87..8ad79309191ba 100644 --- a/docs/sources/send-data/k6/log-generation.md +++ b/docs/sources/send-data/k6/log-generation.md @@ -61,8 +61,8 @@ export default () => { The second and third argument of the method take the lower and upper bound of the batch size. The resulting batch size is a random value between the two -arguments. This mimics the behaviour of a log client, such as Promtail or -the Grafana Agent, where logs are buffered and pushed once a certain batch size +arguments. This mimics the behavior of a log client, such as Grafana Alloy or Promtail, +where logs are buffered and pushed once a certain batch size is reached or after a certain size when no logs have been received. The batch size is not equal to the payload size, as the batch size only counts diff --git a/docs/sources/send-data/otel/_index.md b/docs/sources/send-data/otel/_index.md index 6fa17c317054e..4b28cbf16c7c2 100644 --- a/docs/sources/send-data/otel/_index.md +++ b/docs/sources/send-data/otel/_index.md @@ -1,6 +1,6 @@ --- title: Ingesting logs to Loki using OpenTelemetry Collector -menuTitle: OTEL Collector +menuTitle: OTel Collector description: Configuring the OpenTelemetry Collector to send logs to Loki. aliases: - ../clients/k6/ @@ -97,7 +97,7 @@ Since the OpenTelemetry protocol differs from the Loki storage model, here is ho - Timestamp: One of `LogRecord.TimeUnixNano` or `LogRecord.ObservedTimestamp`, based on which one is set. If both are not set, the ingestion timestamp will be used. -- LogLine: `LogRecord.Body` holds the body of the log. However, since Loki only supports Log body in string format, we will stringify non-string values using the [AsString method from the OTEL collector lib](https://github.com/open-telemetry/opentelemetry-collector/blob/ab3d6c5b64701e690aaa340b0a63f443ff22c1f0/pdata/pcommon/value.go#L353). +- LogLine: `LogRecord.Body` holds the body of the log. However, since Loki only supports Log body in string format, we will stringify non-string values using the [AsString method from the OTel collector lib](https://github.com/open-telemetry/opentelemetry-collector/blob/ab3d6c5b64701e690aaa340b0a63f443ff22c1f0/pdata/pcommon/value.go#L353). - [Structured Metadata]({{< relref "../../get-started/labels/structured-metadata" >}}): Anything which can’t be stored in Index labels and LogLine would be stored as Structured Metadata. Here is a non-exhaustive list of what will be stored in Structured Metadata to give a sense of what it will hold: - Resource Attributes not stored as Index labels is replicated and stored with each log entry. @@ -109,7 +109,7 @@ Things to note before ingesting OpenTelemetry logs to Loki: - Dots (.) are converted to underscores (_). Loki does not support `.` or any other special characters other than `_` in label names. The unsupported characters are replaced with an `_` while converting Attributes to Index Labels or Structured Metadata. - Also, please note that while writing the queries, you must use the normalized format, i.e. use `_` instead of special characters while querying data using OTEL Attributes. + Also, please note that while writing the queries, you must use the normalized format, i.e. use `_` instead of special characters while querying data using OTel Attributes. For example, `service.name` in OTLP would become `service_name` in Loki. @@ -120,7 +120,7 @@ Things to note before ingesting OpenTelemetry logs to Loki: - Stringification of non-string Attribute values - While converting Attribute values in OTLP to Index label values or Structured Metadata, any non-string values are converted to string using [AsString method from the OTEL collector lib](https://github.com/open-telemetry/opentelemetry-collector/blob/ab3d6c5b64701e690aaa340b0a63f443ff22c1f0/pdata/pcommon/value.go#L353). + While converting Attribute values in OTLP to Index label values or Structured Metadata, any non-string values are converted to string using [AsString method from the OTel collector lib](https://github.com/open-telemetry/opentelemetry-collector/blob/ab3d6c5b64701e690aaa340b0a63f443ff22c1f0/pdata/pcommon/value.go#L353). ### Changing the default mapping of OTLP to Loki Format diff --git a/docs/sources/send-data/promtail/_index.md b/docs/sources/send-data/promtail/_index.md index 7e560e661438a..03bbf6487c396 100644 --- a/docs/sources/send-data/promtail/_index.md +++ b/docs/sources/send-data/promtail/_index.md @@ -12,6 +12,10 @@ Promtail is an agent which ships the contents of local logs to a private Grafana instance or [Grafana Cloud](/oss/loki). It is usually deployed to every machine that runs applications which need to be monitored. +{{< admonition type="note" >}} +Promtail is feature complete. All future feature development will occur in Grafana Alloy. +{{< /admonition >}} + It primarily: - Discovers targets diff --git a/docs/sources/send-data/promtail/installation.md b/docs/sources/send-data/promtail/installation.md index 4d2359e94c171..25a818458a80d 100644 --- a/docs/sources/send-data/promtail/installation.md +++ b/docs/sources/send-data/promtail/installation.md @@ -9,6 +9,10 @@ weight: 100 # Install Promtail +{{< admonition type="note" >}} +Promtail is feature complete. All future feature development will occur in Grafana Alloy. +{{< /admonition >}} + Promtail is distributed as a binary, in a Docker container, or there is a Helm chart to install it in a Kubernetes cluster. diff --git a/docs/sources/setup/install/helm/concepts.md b/docs/sources/setup/install/helm/concepts.md index fd8f81ebe4745..581498af89b23 100644 --- a/docs/sources/setup/install/helm/concepts.md +++ b/docs/sources/setup/install/helm/concepts.md @@ -21,7 +21,7 @@ By default Loki will be installed in the scalable mode. This consists of a read ## Dashboards -This chart includes dashboards for monitoring Loki. These require the scrape configs defined in the `monitoring.serviceMonitor` and `monitoring.selfMonitoring` sections described below. The dashboards are deployed via a config map which can be mounted on a Grafana instance. The Dashboard require an installation of the Grafana Agent and the Prometheus operator. The agent is installed with this chart. +This chart includes dashboards for monitoring Loki. These require the scrape configs defined in the `monitoring.serviceMonitor` and `monitoring.selfMonitoring` sections described below. The dashboards are deployed via a config map which can be mounted on a Grafana instance. The Dashboard requires an installation of the Grafana Agent and the Prometheus operator. The agent is installed with this chart. ## Canary diff --git a/docs/sources/setup/migrate/migrate-from-distributed/index.md b/docs/sources/setup/migrate/migrate-from-distributed/index.md index 1618716fd26e8..01b016b8a9376 100644 --- a/docs/sources/setup/migrate/migrate-from-distributed/index.md +++ b/docs/sources/setup/migrate/migrate-from-distributed/index.md @@ -48,7 +48,7 @@ This leverages the fact that the new deployment adds a `app.kubernetes.io/compon Once the new cluster is up, add the appropriate data source in Grafana for the new cluster. Check that the following queries return results: - Confirm new and old logs are in the new deployment. Using the new deployment's Loki data source in Grafana, look for: - - Logs with a job that is unqiue to your existing Promtail or Grafana Agent, the one we adjusted above to exclude logs from the new deployment which is not yet pushing logs to the new deployment. If you can query those via the new deployment in shows we have not lost historical logs. + - Logs with a job that is unique to your existing Promtail or Grafana Agent, the one we adjusted above to exclude logs from the new deployment which is not yet pushing logs to the new deployment. If you can query those via the new deployment in shows we have not lost historical logs. - Logs with the label `job="loki/loki-read"`. The read component does not exist in `loki-distributed`, so this show the new Loki cluster's self monitoring is working correctly. - Confirm new logs are in the old deployment. Using the old deployment's Loki data source in Grafana, look for: - Logs with the label `job="loki/loki-read"`. Since you have excluded logs from the new deployment from going to the `loki-distributed` deployment, if you can query them through the `loki-distributed` Loki data source that show the ingesters have joined the same ring, and are queryable from the `loki-distributed` queriers. diff --git a/docs/sources/setup/migrate/migrate-to-alloy/_index.md b/docs/sources/setup/migrate/migrate-to-alloy/_index.md new file mode 100644 index 0000000000000..adb4dac9b3a1e --- /dev/null +++ b/docs/sources/setup/migrate/migrate-to-alloy/_index.md @@ -0,0 +1,25 @@ +--- +title: Migrate to Alloy +description: Provides links to documentation to migrate to Grafana Alloy. +weight: 100 +--- + +# Migrate to Alloy + +Grafana Alloy is the new name for the Grafana Labs distribution of the OpenTelemetry collector. Grafana Agent Static, Grafana Agent Flow, and Grafana Agent Operator have been deprecated and are in Long-Term Support (LTS) through October 31, 2025. They will reach an End-of-Life (EOL) on November 1, 2025. Grafana Labs has provided tools and migration documentation to assist you in migrating to Grafana Alloy. + +Read more about why we recommend migrating to [Grafana Alloy](https://grafana.com/blog/2024/04/09/grafana-alloy-opentelemetry-collector-with-prometheus-pipelines/). + +This section provides links to documentation for how to migrate to Alloy. + +- [Migrate from Grafana Agent Static](https://grafana.com/docs/alloy/latest/tasks/migrate/from-static/) + +- [Migrate from Grafana Agent Flow](https://grafana.com/docs/alloy/latest/tasks/migrate/from-flow/) + +- [Migrate from Grafana Agent Operator](https://grafana.com/docs/alloy/latest/tasks/migrate/from-operator/) + +- [Migrate from OpenTelemetry Collector](https://grafana.com/docs/alloy/latest/tasks/migrate/from-otelcol/) + +- [Migrate from Prometheus](https://grafana.com/docs/alloy/latest/tasks/migrate/from-prometheus/) + +- [Migrate from Promtail](https://grafana.com/docs/alloy/latest/tasks/migrate/from-promtail/) diff --git a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md index 963913e21ef9e..49ba506dc5536 100644 --- a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md +++ b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md @@ -2,7 +2,7 @@ title: Migrate to TSDB menuTitle: Migrate to TSDB description: Migration guide for moving from any of the older indexes to TSDB -weight: 100 +weight: 300 keywords: - migrate - tsdb