diff --git a/CHANGELOG.md b/CHANGELOG.md index ae8d9bbb162f6..8975f31b20689 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ ##### Enhancements +* [11319](https://github.com/grafana/loki/pull/11319) **someStrangerFromTheAbyss**: Helm: Add extraContainers to the write pods. * [11243](https://github.com/grafana/loki/pull/11243) **kavirajk**: Inflight-logging: Add extra metadata to inflight requests logging. * [11110](https://github.com/grafana/loki/pull/11003) **MichelHollands**: Change the default of the `metrics-namespace` flag to 'loki'. * [11086](https://github.com/grafana/loki/pull/11086) **kandrew5**: Helm: Allow topologySpreadConstraints @@ -37,6 +38,8 @@ * [10727](https://github.com/grafana/loki/pull/10727) **sandeepsukhani** Native otlp ingestion support * [11051](https://github.com/grafana/loki/pull/11051) Refactor to not use global logger in modules * [10956](https://github.com/grafana/loki/pull/10956) **jeschkies** do not wrap requests but send pure Protobuf from frontend v2 via scheduler to querier when `-frontend.encoding=protobuf`. +* [10417](https://github.com/grafana/loki/pull/10417) **jeschkies** shard `quantile_over_time` range queries using probabilistic data structures. +* [11284](https://github.com/grafana/loki/pull/11284) **ashwanthgoli** Config: Adds `frontend.max-query-capacity` to tune per-tenant query capacity. ##### Fixes * [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var. @@ -83,6 +86,8 @@ #### Jsonnet +* [11312](https://github.com/grafana/loki/pull/11312) **sentoz**: Loki ksonnet: Do not generate configMap for consul if you are using memberlist + * [11020](https://github.com/grafana/loki/pull/11020) **ashwanthgoli**: Loki ksonnet: Do not generate table-manager manifests if shipper store is in-use. * [10784](https://github.com/grafana/loki/pull/10894) **slim-bean** Update index gateway client to use a headless service. diff --git a/Makefile b/Makefile index ee022ba2129f0..0b73cfee6d410 100644 --- a/Makefile +++ b/Makefile @@ -427,7 +427,7 @@ PLUGIN_ARCH ?= define build-rootfs rm -rf clients/cmd/docker-driver/rootfs || true mkdir clients/cmd/docker-driver/rootfs - docker build -t rootfsimage -f clients/cmd/docker-driver/Dockerfile . + docker build --build-arg $(BUILD_IMAGE) -t rootfsimage -f clients/cmd/docker-driver/Dockerfile . ID=$$(docker create rootfsimage true) && \ (docker export $$ID | tar -x -C clients/cmd/docker-driver/rootfs) && \ diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile index 5f81dedbb5bf4..5fe3fae2c97dc 100644 --- a/clients/cmd/docker-driver/Dockerfile +++ b/clients/cmd/docker-driver/Dockerfile @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.29.3 +ARG BUILD_IMAGE=grafana/loki-build-image:0.31.2 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . diff --git a/clients/cmd/fluent-bit/loki_test.go b/clients/cmd/fluent-bit/loki_test.go index a6033360cd30a..1bfd21d22ce02 100644 --- a/clients/cmd/fluent-bit/loki_test.go +++ b/clients/cmd/fluent-bit/loki_test.go @@ -1,3 +1,5 @@ +//go:build cgo + package main import ( diff --git a/clients/cmd/fluent-bit/out_grafana_loki.go b/clients/cmd/fluent-bit/out_grafana_loki.go index 38f24c3841a67..d396fddfc8da2 100644 --- a/clients/cmd/fluent-bit/out_grafana_loki.go +++ b/clients/cmd/fluent-bit/out_grafana_loki.go @@ -63,7 +63,7 @@ func FLBPluginInit(ctx unsafe.Pointer) int { level.Info(logger).Log("[flb-go]", "Starting fluent-bit-go-loki", "version", version.Info()) paramLogger := log.With(logger, "[flb-go]", "provided parameter") - level.Info(paramLogger).Log("URL", conf.clientConfig.URL) + level.Info(paramLogger).Log("URL", conf.clientConfig.URL.Redacted()) level.Info(paramLogger).Log("TenantID", conf.clientConfig.TenantID) level.Info(paramLogger).Log("BatchWait", fmt.Sprintf("%.3fs", conf.clientConfig.BatchWait.Seconds())) level.Info(paramLogger).Log("BatchSize", conf.clientConfig.BatchSize) diff --git a/docs/sources/alert/_index.md b/docs/sources/alert/_index.md index a6d29cec5ac19..81a2671c2101f 100644 --- a/docs/sources/alert/_index.md +++ b/docs/sources/alert/_index.md @@ -202,16 +202,22 @@ Another great use case is alerting on high cardinality sources. These are things Creating these alerts in LogQL is attractive because these metrics can be extracted at _query time_, meaning we don't suffer the cardinality explosion in our metrics store. -> **Note** As an example, we can use LogQL v2 to help Loki to monitor _itself_, alerting us when specific tenants have queries that take longer than 10s to complete! To do so, we'd use the following query: `sum by (org_id) (rate({job="loki-prod/query-frontend"} |= "metrics.go" | logfmt | duration > 10s [1m]))` +{{% admonition type="note" %}} +As an example, we can use LogQL v2 to help Loki to monitor _itself_, alerting us when specific tenants have queries that take longer than 10s to complete! To do so, we'd use the following query: `sum by (org_id) (rate({job="loki-prod/query-frontend"} |= "metrics.go" | logfmt | duration > 10s [1m]) +{{% /admonition %}}`. ## Interacting with the Ruler ### Cortextool Because the rule files are identical to Prometheus rule files, we can interact with the Loki Ruler via [`cortextool`](https://github.com/grafana/cortex-tools#rules). The CLI is in early development, but it works with both Loki and Cortex. Pass the `--backend=loki` option when using it with Loki. -> **Note:** Not all commands in cortextool currently support Loki. +{{% admonition type="note" %}} +Not all commands in cortextool currently support Loki. +{{% /admonition %}} -> **Note:** cortextool was intended to run against multi-tenant Loki, commands need an `--id=` flag set to the Loki instance ID or set the environment variable `CORTEX_TENANT_ID`. If Loki is running in single tenant mode, the required ID is `fake` (yes we know this might seem alarming but it's totally fine, no it can't be changed) +{{% admonition type="note" %}} +cortextool was intended to run against multi-tenant Loki, commands need an `--id=` flag set to the Loki instance ID or set the environment variable `CORTEX_TENANT_ID`. If Loki is running in single tenant mode, the required ID is `fake`. +{{% /admonition %}} An example workflow is included below: diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index 9844c6109dd36..40e98748356eb 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -842,6 +842,11 @@ results_cache: # CLI flag: -querier.parallelise-shardable-queries [parallelise_shardable_queries: | default = true] +# A comma-separated list of LogQL vector and range aggregations that should be +# sharded +# CLI flag: -querier.shard-aggregations +[shard_aggregations: | default = ""] + # Cache index stats query results. # CLI flag: -querier.cache-index-stats-results [cache_index_stats_results: | default = false] @@ -1838,6 +1843,21 @@ client: # CLI flag: -bloom-gateway-client.log-gateway-requests [log_gateway_requests: | default = false] + results_cache: + # The cache block configures the cache backend. + # The CLI flags prefix for this block configuration is: + # bloom-gateway-client.cache + [cache: ] + + # Use compression in cache. The default is an empty value '', which disables + # compression. Supported values are: 'snappy' and ''. + # CLI flag: -bloom-gateway-client.cache.compression + [compression: | default = ""] + + # Flag to control whether to cache bloom gateway client requests/responses. + # CLI flag: -bloom-gateway-client.cache_results + [cache_results: | default = false] + # Number of workers to use for filtering chunks concurrently. # CLI flag: -bloom-gateway.worker-concurrency [worker_concurrency: | default = 4] @@ -2768,6 +2788,22 @@ The `limits_config` block configures global and per-tenant limits in Loki. # CLI flag: -frontend.max-queriers-per-tenant [max_queriers_per_tenant: | default = 0] +# How much of the available query capacity ("querier" components in distributed +# mode, "read" components in SSD mode) can be used by a single tenant. Allowed +# values are 0.0 to 1.0. For example, setting this to 0.5 would allow a tenant +# to use half of the available queriers for processing the query workload. If +# set to 0, query capacity is determined by frontend.max-queriers-per-tenant. +# When both frontend.max-queriers-per-tenant and frontend.max-query-capacity are +# configured, smaller value of the resulting querier replica count is +# considered: min(frontend.max-queriers-per-tenant, ceil(querier_replicas * +# frontend.max-query-capacity)). *All* queriers will handle requests for the +# tenant if neither limits are applied. This option only works with queriers +# connecting to the query-frontend / query-scheduler, not when using downstream +# URL. Use this feature in a multi-tenant setup where you need to limit query +# capacity for certain tenants. +# CLI flag: -frontend.max-query-capacity +[max_query_capacity: | default = 0] + # Number of days of index to be kept always downloaded for queries. Applies only # to per user index in boltdb-shipper index store. 0 to disable. # CLI flag: -store.query-ready-index-num-days @@ -3012,6 +3048,10 @@ shard_streams: # CLI flag: -bloom-gateway.blocks-downloading-parallelism [bloom_gateway_blocks_downloading_parallelism: | default = 50] +# Interval for computing the cache key in the Bloom Gateway. +# CLI flag: -bloom-gateway.cache-key-interval +[bloom_gateway_cache_key_interval: | default = 15m] + # Allow user to send structured metadata in push payload. # CLI flag: -validation.allow-structured-metadata [allow_structured_metadata: | default = false] @@ -4217,6 +4257,7 @@ The TLS configuration. The cache block configures the cache backend. The supported CLI flags `` used to reference this configuration block are: +- `bloom-gateway-client.cache` - `frontend` - `frontend.index-stats-results-cache` - `frontend.volume-results-cache` diff --git a/docs/sources/get-started/labels/structured-metadata.md b/docs/sources/get-started/labels/structured-metadata.md index ee6c58c1cecdc..3c7ad0793829f 100644 --- a/docs/sources/get-started/labels/structured-metadata.md +++ b/docs/sources/get-started/labels/structured-metadata.md @@ -6,7 +6,7 @@ description: Attaching metadata to logs. # What is structured metadata {{% admonition type="warning" %}} -Structured metadata is an experimental feature and is subject to change in future releases of Grafana Loki. This feature is not yet available for Cloud Logs users. +Structured metadata is an experimental feature and is subject to change in future releases of Grafana Loki. {{% /admonition %}} {{% admonition type="warning" %}} diff --git a/docs/sources/operations/automatic-stream-sharding.md b/docs/sources/operations/automatic-stream-sharding.md index 0c0c96d0d9010..a973135fc75c4 100644 --- a/docs/sources/operations/automatic-stream-sharding.md +++ b/docs/sources/operations/automatic-stream-sharding.md @@ -18,14 +18,17 @@ per-stream rate limit. shard_streams: enabled: true ``` -2. Optionally lower the `desired_rate` in bytes if you find that the system is still hitting the `per_stream_rate_limit`: +1. Optionally lower the `desired_rate` in bytes if you find that the system is still hitting the `per_stream_rate_limit`: ```yaml limits_config: shard_streams: enabled: true desired_rate: 2097152 #2MiB ``` -3. Optionally enable `logging_enabled` for debugging stream sharding. **Note**: this may affect the ingestion performance of Loki. +1. Optionally enable `logging_enabled` for debugging stream sharding. + {{% admonition type="note" %}} + This may affect the ingestion performance of Loki. + {{% /admonition %}} ```yaml limits_config: shard_streams: diff --git a/docs/sources/operations/loki-canary/_index.md b/docs/sources/operations/loki-canary/_index.md index 4a5f26b423c8e..a5f04c09a1e0b 100644 --- a/docs/sources/operations/loki-canary/_index.md +++ b/docs/sources/operations/loki-canary/_index.md @@ -17,7 +17,7 @@ artificial log lines, such that Loki Canary forms information about the performance of the Loki cluster. The information is available as Prometheus time series metrics. -{{< figure max-width="75%" src="./loki-canary-block.png">}} +{{< figure max-width="75%" src="./loki-canary-block.png" alt="Loki canary">}} Loki Canary writes a log to a file and stores the timestamp in an internal array. The contents look something like this: diff --git a/docs/sources/operations/scalability.md b/docs/sources/operations/scalability.md index ff8f1d06a0385..e916e2bbdbe70 100644 --- a/docs/sources/operations/scalability.md +++ b/docs/sources/operations/scalability.md @@ -66,7 +66,9 @@ this will result in far lower `ruler` resource usage because the majority of the The LogQL queries coming from the `ruler` will be executed against the given `query-frontend` service. Requests will be load-balanced across all `query-frontend` IPs if the `dns:///` prefix is used. -> **Note:** Queries that fail to execute are _not_ retried. +{{% admonition type="note" %}} +Queries that fail to execute are _not_ retried. +{{% /admonition %}} ### Limits and Observability diff --git a/docs/sources/operations/storage/logs-deletion.md b/docs/sources/operations/storage/logs-deletion.md index 5de829750d4aa..f5885ed66177e 100644 --- a/docs/sources/operations/storage/logs-deletion.md +++ b/docs/sources/operations/storage/logs-deletion.md @@ -22,7 +22,9 @@ Log entry deletion relies on configuration of the custom logs retention workflow Enable log entry deletion by setting `retention_enabled` to true in the compactor's configuration and setting and `deletion_mode` to `filter-only` or `filter-and-delete` in the runtime config. `delete_request_store` also needs to be configured when retention is enabled to process delete requests, this determines the storage bucket that stores the delete requests. -> **Warning:** Be very careful when enabling retention. It is strongly recommended that you also enable versioning on your objects in object storage to allow you to recover from accidental misconfiguration of a retention setting. If you want to enable deletion but not not want to enforce retention, configure the `retention_period` setting with a value of `0s`. +{{% admonition type="warning" %}} +Be very careful when enabling retention. It is strongly recommended that you also enable versioning on your objects in object storage to allow you to recover from accidental misconfiguration of a retention setting. If you want to enable deletion but not not want to enforce retention, configure the `retention_period` setting with a value of `0s`. +{{% /admonition %}} Because it is a runtime configuration, `deletion_mode` can be set per-tenant, if desired. diff --git a/docs/sources/query/template_functions.md b/docs/sources/query/template_functions.md index 43cd78e0030e9..784a396d19539 100644 --- a/docs/sources/query/template_functions.md +++ b/docs/sources/query/template_functions.md @@ -710,7 +710,7 @@ Examples: ```template {{ default "-" "" }} // output: - -{{ default "" "foo" }} // output: foo +{{ default "-" "foo" }} // output: foo ``` Example of a query to print a `-` if the `http_request_headers_x_forwarded_for` label is empty: diff --git a/docs/sources/reference/api.md b/docs/sources/reference/api.md index a2439ccfaee77..fabac533b32bd 100644 --- a/docs/sources/reference/api.md +++ b/docs/sources/reference/api.md @@ -12,8 +12,10 @@ weight: 100 Loki exposes an HTTP API for pushing, querying, and tailing log data, as well as for viewing and managing cluster information. -**Note that authorization is not part of the Loki API.** +{{% admonition type="note" %}} +Note that authorization is not part of the Loki API. Authorization needs to be done separately, for example, using an open-source load-balancer such as NGINX. +{{% /admonition %}} ## Endpoints @@ -1291,7 +1293,10 @@ DELETE /loki/api/v1/delete Query parameters: - `request_id=`: Identifies the delete request to cancel; IDs are found using the `delete` endpoint. -- `force=`: When the `force` query parameter is true, partially completed delete requests will be canceled. NOTE: some data from the request may still be deleted and the deleted request will be listed as 'processed' +- `force=`: When the `force` query parameter is true, partially completed delete requests will be canceled. + {{% admonition type="note" %}} + some data from the request may still be deleted and the deleted request will be listed as 'processed'. + {{% /admonition %}} A 204 response indicates success. diff --git a/docs/sources/release-notes/cadence.md b/docs/sources/release-notes/cadence.md index 2cc6498aaadae..f13781cf1c5f3 100644 --- a/docs/sources/release-notes/cadence.md +++ b/docs/sources/release-notes/cadence.md @@ -15,10 +15,12 @@ naming scheme: `MAJOR`.`MINOR`.`PATCH`. - `MINOR` (roughly once a quarter): these releases include new features which generally do not break backwards-compatibility, but from time to time we might introduce _minor_ breaking changes, and we will specify these in our upgrade docs. - `PATCH` (roughly once or twice a month): these releases include bug and security fixes which do not break backwards-compatibility. -> **NOTE:** While our naming scheme resembles [Semantic Versioning](https://semver.org/), at this time we do not strictly follow its +{{% admonition type="note" %}} +While our naming scheme resembles [Semantic Versioning](https://semver.org/), at this time we do not strictly follow its guidelines to the letter. Our goal is to provide regular releases that are as stable as possible, and we take backwards-compatibility seriously. As with any software, always read the [release notes](/release-notes) and the [upgrade guide](/upgrading) whenever choosing a new version of Loki to install. +{{% /admonition %}} New releases are based of a [weekly release](#weekly-releases) which we have vetted for stability over a number of weeks. diff --git a/docs/sources/release-notes/v2-3.md b/docs/sources/release-notes/v2-3.md index 6167bf343bee4..382157d73742a 100644 --- a/docs/sources/release-notes/v2-3.md +++ b/docs/sources/release-notes/v2-3.md @@ -78,8 +78,8 @@ List of security fixes for 2.3.x. * [4020](https://github.com/grafana/loki/pull/4020) **simonswine**: Restrict path segments in TenantIDs (CVE-2021-36156 CVE-2021-36157). -**Note** Exploitation of this vulnerability requires the ability for an attacker to craft and send directly to Loki an `X-Scope-OrgID` header, end users should not have the ability to create and send this header directly to Loki as it controls access to tenants and is important to control setting of this header for proper tenant isolation and security. We always recommend having a proxy or gateway be responsible for setting the `X-Scope-OrgID`. - +{{% admonition type="note" %}} +Exploitation of this vulnerability requires the ability for an attacker to craft and send directly to Loki an `X-Scope-OrgID` header, end users should not have the ability to create and send this header directly to Loki as it controls access to tenants and is important to control setting of this header for proper tenant isolation and security. We always recommend having a proxy or gateway be responsible for setting the `X-Scope-OrgID`.{{% /admonition %}} ## Bug fixes diff --git a/docs/sources/send-data/docker-driver/configuration.md b/docs/sources/send-data/docker-driver/configuration.md index 15ef123232426..38d3962f8a0a2 100644 --- a/docs/sources/send-data/docker-driver/configuration.md +++ b/docs/sources/send-data/docker-driver/configuration.md @@ -33,10 +33,11 @@ docker run --log-driver=loki \ --log-opt loki-batch-size=400 \ grafana/grafana ``` - -> **Note**: The Loki logging driver still uses the json-log driver in combination with sending logs to Loki, this is mainly useful to keep the `docker logs` command working. -> You can adjust file size and rotation using the respective log option `max-size` and `max-file`. Keep in mind that default values for these options are not taken from json-log configuration. -> You can deactivate this behavior by setting the log option `no-file` to true. +{{% admonition type="note" %}} +The Loki logging driver still uses the json-log driver in combination with sending logs to Loki, this is mainly useful to keep the `docker logs` command working. +You can adjust file size and rotation using the respective log option `max-size` and `max-file`. Keep in mind that default values for these options are not taken from json-log configuration. +You can deactivate this behavior by setting the log option `no-file` to true. +{{% /admonition %}} ## Change the default logging driver @@ -64,10 +65,11 @@ Options for the logging driver can also be configured with `log-opts` in the } } ``` - -> **Note**: log-opt configuration options in daemon.json must be provided as +{{% admonition type="note" %}} +log-opt configuration options in daemon.json must be provided as > strings. Boolean and numeric values (such as the value for loki-batch-size in > the example above) must therefore be enclosed in quotes (`"`). +{{% /admonition %}} After changing `daemon.json`, restart the Docker daemon for the changes to take effect. All **newly created** containers from that host will then send logs to Loki via the driver. @@ -102,9 +104,9 @@ docker-compose -f docker-compose.yaml up Once deployed, the Grafana service will send its logs to Loki. -> **Note**: stack name and service name for each swarm service and project name -> and service name for each compose service are automatically discovered and -> sent as Loki labels, this way you can filter by them in Grafana. +{{% admonition type="note" %}} +Stack name and service name for each swarm service and project name and service name for each compose service are automatically discovered and sent as Loki labels, this way you can filter by them in Grafana. +{{% /admonition %}} ## Labels @@ -148,7 +150,9 @@ services: - "3000:3000" ``` -> Note the `loki-pipeline-stages: |` allowing to keep the indentation correct. +{{% admonition type="note" %}} +Note the `loki-pipeline-stages: |` letting you keep the indentation correct. +{{% /admonition %}} When using docker run you can also pass the value via a string parameter like such: diff --git a/docs/sources/send-data/fluentd/_index.md b/docs/sources/send-data/fluentd/_index.md index bdf242e81b57e..e28ec048de068 100644 --- a/docs/sources/send-data/fluentd/_index.md +++ b/docs/sources/send-data/fluentd/_index.md @@ -69,7 +69,9 @@ services: ## Usage -**Note**: use either `` or `extra_labels` to set at least one label. +{{% admonition type="note" %}} +Use either `` or `extra_labels` to set at least one label. +{{% /admonition %}} In your Fluentd configuration, add `@type loki`. Additional configuration is optional. Default values would look like this: diff --git a/docs/sources/send-data/lambda-promtail/_index.md b/docs/sources/send-data/lambda-promtail/_index.md index 170665713ac26..7306d76f02a46 100644 --- a/docs/sources/send-data/lambda-promtail/_index.md +++ b/docs/sources/send-data/lambda-promtail/_index.md @@ -134,7 +134,7 @@ To manage this issue, AWS introduced [S3 event notifications with Event Bridge]( The diagram below shows how notifications logs will be written from the source service into an S3 bucket. From there on, the S3 bucket will send an `Object created` notification into the EventBridge `default` bus, where we can configure a rule to trigger Lambda Promtail. -![](https://grafana.com/media/docs/loki/lambda-promtail-with-eventbridge.png) +{{< figure src="https://grafana.com/media/docs/loki/lambda-promtail-with-eventbridge.png" alt="The diagram shows how notifications logs are written from the source service into an S3 bucket">}} The [template-eventbridge.yaml](https://github.com/grafana/loki/blob/main/tools/lambda-promtail/template-eventbridge.yaml) CloudFormation template configures Lambda-promtail with EventBridge to address this known issue. To deploy the template, use the snippet below, completing appropriately the `ParameterValue` arguments. diff --git a/docs/sources/send-data/promtail/cloud/ecs/_index.md b/docs/sources/send-data/promtail/cloud/ecs/_index.md index 87b4eb4cf06a9..90682f265ded5 100644 --- a/docs/sources/send-data/promtail/cloud/ecs/_index.md +++ b/docs/sources/send-data/promtail/cloud/ecs/_index.md @@ -130,13 +130,18 @@ The `log_router` container image is the [Fluent bit Loki docker image][fluentbit "logConfiguration": { "logDriver": "awsfirelens", "options": { - "Name": "grafana-loki", - "Url": "https://:@/loki/api/v1/push", + "Name": "loki", + "Host": "", + "Http_User": "", "Labels": "{job=\"firelens\"}", "RemoveKeys": "container_id,ecs_task_arn", "LabelKeys": "container_name,ecs_task_definition,source,ecs_cluster", "LineFormat": "key_value" - } + }, + "secretOptions": [{ + "name": "Http_Passwd", + "valueFrom": "data.aws_secretsmanager_secret.grafana_cloud_loki_http_password.id" + }] }, "name": "sample-app" } @@ -144,7 +149,7 @@ The `log_router` container image is the [Fluent bit Loki docker image][fluentbit The second container is our `sample-app`, a simple [alpine][alpine] container that prints to stdout welcoming messages. To send those logs to Loki, we will configure this container to use the log driver `awsfirelens`. -Go ahead and replace the `Url` property with your [GrafanaCloud][GrafanaCloud] credentials, you can find them in your [account][grafanacloud account] in the Loki instance page. If you're running your own Loki instance replace completely the URL (e.g `http://my-loki.com:3100/loki/api/v1/push`). +Go ahead and replace the `Host` and `HTTP_User` property with your [GrafanaCloud][GrafanaCloud] credentials, you can find them in your [account][grafanacloud account] in the Loki instance page. If you're running your own Loki instance replace completely the URL (for example, `http://my-loki.com:3100/loki/api/v1/push`). We include plain text credentials in `options` for simplicity. However, this exposes credentials in your ECS task definition and in any version-controlled configuration. Mitigate this issue by using a secret store such as [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html), combined with the `secretOptions` configuration option for [injecting sensitive data in a log configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-secrets.html#secrets-logconfig). diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md index 7a04817f83aaf..68042664bb13b 100644 --- a/docs/sources/send-data/promtail/configuration.md +++ b/docs/sources/send-data/promtail/configuration.md @@ -66,10 +66,12 @@ ${VAR:-default_value} Where default_value is the value to use if the environment variable is undefined. -**Note**: With `expand-env=true` the configuration will first run through +{{% admonition type="note" %}} +With `expand-env=true` the configuration will first run through [envsubst](https://pkg.go.dev/github.com/drone/envsubst) which will replace double backslashes with single backslashes. Because of this every use of a backslash `\` needs to -be replaced with a double backslash `\\` +be replaced with a double backslash `\\`. +{{% /admonition %}} ### Generic placeholders @@ -848,7 +850,9 @@ labels: [path: ] ``` -**Note**: priority label is available as both value and keyword. For example, if `priority` is `3` then the labels will be `__journal_priority` with a value `3` and `__journal_priority_keyword` with a corresponding keyword `err`. +{{% admonition type="note" %}} +Priority label is available as both value and keyword. For example, if `priority` is `3` then the labels will be `__journal_priority` with a value `3` and `__journal_priority_keyword` with a corresponding keyword `err`. +{{% /admonition %}} ### syslog diff --git a/docs/sources/send-data/promtail/logrotation/_index.md b/docs/sources/send-data/promtail/logrotation/_index.md index 8045adc05d5ff..f90941780a47d 100644 --- a/docs/sources/send-data/promtail/logrotation/_index.md +++ b/docs/sources/send-data/promtail/logrotation/_index.md @@ -18,7 +18,9 @@ At any point in time, there may be three processes working on a log file as show 2. Tailer - A reader that reads log lines as they are appended, for example, agents like Promtail. 3. Log Rotator - A process that rotates the log file either based on time (for example, scheduled every day) or size (for example, a log file reached its maximum size). -> **NOTE:** Here `fd` defines a file descriptor. Once a file is open for read or write, The Operating System returns a unique file descriptor (usually an integer) per process, and all the operations like read and write are done over that file descriptor. In other words, once the file is opened successfully, the file descriptor matters more than the file name. +{{% admonition type="note" %}} +Here `fd` defines a file descriptor. Once a file is open for read or write, The Operating System returns a unique file descriptor (usually an integer) per process, and all the operations like read and write are done over that file descriptor. In other words, once the file is opened successfully, the file descriptor matters more than the file name. +{{% /admonition %}} One of the critical components here is the log rotator. Let's understand how it impacts other components like the appender and tailer. @@ -96,7 +98,9 @@ You can [configure](https://kubernetes.io/docs/concepts/cluster-administration/l Both should be part of the `kubelet` config. If you run a managed version of Kubernetes in Cloud, refer to your cloud provider documentation for configuring `kubelet`. Examples [GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/node-system-config#create), [AKS](https://learn.microsoft.com/en-us/azure/aks/custom-node-configuration#use-custom-node-configuration) and [EKS](https://eksctl.io/usage/customizing-the-kubelet/#customizing-kubelet-configuration). -> **NOTE:** Log rotation managed by `kubelet` supports only rename + create and doesn't support copy + truncate. +{{% admonition type="note" %}} +Log rotation managed by `kubelet` supports only rename + create and doesn't support copy + truncate. +{{% /admonition %}} If `kubelet` is not configured to manage the log rotation, then it's up to the Container Runtime Interface (CRI) the cluster uses. Alternatively, log rotation can be managed by the `logrotate` utility in the Kubernetes node itself. @@ -138,7 +142,9 @@ Example `/etc/docker/daemon.json`: If neither `kubelet` nor `CRI` is configured for rotating logs, then the `logrotate` utility can be used on the Kubernetes nodes as explained previously. -> **NOTE:** We recommend using kubelet for log rotation. +{{% admonition type="note" %}} +We recommend using kubelet for log rotation. +{{% /admonition %}} ## Configure Promtail diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index ede76840c8f6c..8252a6fd103a3 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -2261,6 +2261,27 @@ null "secretAccessKey": null, "signatureVersion": null }, + "swift": { + "auth_url": null, + "auth_version": null, + "connect_timeout": null, + "container_name": null, + "domain_id": null, + "domain_name": null, + "internal": null, + "max_retries": null, + "password": null, + "project_domain_id": null, + "project_domain_name": null, + "project_id": null, + "project_name": null, + "region_name": null, + "request_timeout": null, + "user_domain_id": null, + "user_domain_name": null, + "user_id": null, + "username": null + }, "type": "s3" } @@ -4531,6 +4552,15 @@ null
 []
 
+ + + + write.extraContainers + list + Containers to add to the write pods +
+[]
+
diff --git a/go.mod b/go.mod index 75c6f559393fd..78ddccab4b499 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/grafana/loki -go 1.20 +go 1.21 + +toolchain go1.21.3 require ( cloud.google.com/go/bigtable v1.18.1 diff --git a/go.sum b/go.sum index 42fd5c822b694..034356232c761 100644 --- a/go.sum +++ b/go.sum @@ -85,6 +85,7 @@ cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= @@ -236,6 +237,7 @@ github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g= github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/IBM/go-sdk-core/v5 v5.13.1 h1:zD6p3t1whAlRJo/VBmE69c8RcH9LCHL1n0/sO1MWlpw= github.com/IBM/go-sdk-core/v5 v5.13.1/go.mod h1:pVkN7IGmsSdmR1ZCU4E/cLcCclqRKMYgg7ya+O2Mk6g= github.com/IBM/ibm-cos-sdk-go v1.10.0 h1:/2VIev2/jBei39OqU2+nSZQnoWJ+KtkiSAIDkqsd7uU= @@ -261,6 +263,7 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= +github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -275,6 +278,7 @@ github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSd github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= +github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Workiva/go-datastructures v1.1.0 h1:hu20UpgZneBhQ3ZvwiOGlqJSKIosin2Rd5wAKUHEO/k= @@ -372,6 +376,7 @@ github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -413,6 +418,7 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY= @@ -497,6 +503,7 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/ github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dnsimple/dnsimple-go v0.30.0/go.mod h1:O5TJ0/U6r7AfT8niYNlmohpLbCSG+c71tQlGr9SeGrg= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -540,6 +547,7 @@ github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= github.com/efficientgo/e2e v0.13.1-0.20220922081603-45de9fc588a8 h1:UFLc39BcUXahSNCLUrKjNGZABMUZaS4M74EZvTRnq3k= +github.com/efficientgo/e2e v0.13.1-0.20220922081603-45de9fc588a8/go.mod h1:Hi+sz0REtlhVZ8zcdeTC3j6LUEEpJpPtNjOaOKuNcgI= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -565,6 +573,7 @@ github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6 github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= @@ -755,6 +764,7 @@ github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9F github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= @@ -765,11 +775,13 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= @@ -911,6 +923,7 @@ github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIG github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -1025,7 +1038,9 @@ github.com/hashicorp/consul/proto-public v0.2.1/go.mod h1:iWNlBDJIZQJC3bBiCThoqg github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -1083,6 +1098,7 @@ github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -1101,6 +1117,7 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= +github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= @@ -1122,10 +1139,12 @@ github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQg github.com/heroku/x v0.0.61 h1:yfoAAtnFWSFZj+UlS+RZL/h8QYEp1R4wHVEg0G+Hwh4= github.com/heroku/x v0.0.61/go.mod h1:C7xYbpMdond+s6L5VpniDUSVPRwm3kZum1o7XiD5ZHk= github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ= +github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.1.3/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -1153,6 +1172,7 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:PcNJqIlcX/dj3DTG/+QQnRvSgTMG6CLpRMjWcv4+J6w= github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= +github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= @@ -1229,6 +1249,7 @@ github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1238,6 +1259,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1261,6 +1283,7 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= github.com/linode/linodego v0.10.0/go.mod h1:cziNP7pbvE3mXIPneHj0oRY8L1WtGEIKlZ8LANE4eXA= github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= +github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ= github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ= github.com/lucas-clemente/quic-go v0.13.1/go.mod h1:Vn3/Fb0/77b02SGhQk36KzOUmXgVpFfizUfW5WMaqyU= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= @@ -1373,6 +1396,7 @@ github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= +github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -1403,6 +1427,7 @@ github.com/nrdcg/goinwx v0.6.1/go.mod h1:XPiut7enlbEdntAqalBIqcYcTEVhpv/dKWgDCX2 github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4rRpRiyw= github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= @@ -1418,14 +1443,18 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.86.0 h1:g7HlND105lwm7NW8JCxAfbpaFyk1WKcEUUVwchIo9zE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.86.0/go.mod h1:BTFCu+oeOnvPt/R6HQDW1S/duHuJcV5Xb0pbURCSMno= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0 h1:nnzuEQYlsRIkMPAw1jEl+8L2Is68QQl58QvY2dHHgDU= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.86.0/go.mod h1:prodbjWZpQkRcd45W2wkRaryv6JomuuWZUmM6mDj27k= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= @@ -1454,12 +1483,14 @@ github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2J github.com/oracle/oci-go-sdk v7.0.0+incompatible h1:oj5ESjXwwkFRdhZSnPlShvLWYdt/IZ65RQxveYM3maA= github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/oracle/oci-go-sdk/v65 v65.41.1 h1:+lbosOyNiib3TGJDvLq1HwEAuFqkOjPJDIkyxM15WdQ= +github.com/oracle/oci-go-sdk/v65 v65.41.1/go.mod h1:MXMLMzHnnd9wlpgadPkdlkZ9YrwQmCOmbX5kjVEJodw= github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.11.0 h1:aSXMqYR/EPNjGE8epgqwDay+P30hCBZIveY0WZbAWh0= github.com/oschwald/maxminddb-golang v1.11.0/go.mod h1:YmVI+H0zh3ySFR3w+oz8PCfglAFj3PuCmui13+P9zDg= github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014/go.mod h1:joRatxRJaZBsY3JAOEMcoOp05CnZzsx4scTxi95DHyQ= github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1570,6 +1601,7 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4tU1p9YHN4+suwV7M= github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= @@ -1589,6 +1621,7 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -1679,6 +1712,7 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVl github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= +github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3 h1:avZFY25vRM35FggTBQj2WXq45yEvIKbDLUcNDrJLfKU= github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= @@ -1717,6 +1751,7 @@ github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59b github.com/vultr/govultr v0.1.4 h1:UnNMixYFVO0p80itc8PcweoVENyo1PasfvwKhoasR9U= github.com/vultr/govultr v0.1.4/go.mod h1:9H008Uxr/C4vFNGLqKx232C206GL0PBHzOP0809bGNA= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= @@ -2612,6 +2647,7 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/integration/client/client.go b/integration/client/client.go index 12b5cd11277c5..f293ad81ddb94 100644 --- a/integration/client/client.go +++ b/integration/client/client.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "net" "net/http" "net/url" "strconv" @@ -14,11 +15,17 @@ import ( "time" "github.com/buger/jsonparser" + "github.com/gorilla/websocket" "github.com/grafana/dskit/user" + "github.com/prometheus/common/config" "github.com/prometheus/prometheus/model/labels" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/plog/plogotlp" + + logcli "github.com/grafana/loki/pkg/logcli/client" + "github.com/grafana/loki/pkg/loghttp" + "github.com/grafana/loki/pkg/util/unmarshal" ) const requestTimeout = 30 * time.Second @@ -655,6 +662,46 @@ func (c *Client) Series(ctx context.Context, matcher string) ([]map[string]strin return values.Data, nil } +type TailResult struct { + Response loghttp.TailResponse + Err error +} + +func (c *Client) Tail(ctx context.Context, query string, out chan TailResult) (*websocket.Conn, error) { + client := &logcli.DefaultClient{ + Address: c.baseURL, + OrgID: c.instanceID, + TLSConfig: config.TLSConfig{}, + } + start := time.Now().Add(-1 * time.Hour) + + wc, err := client.LiveTailQueryConn(query, time.Duration(0), 100, start, false) + if err != nil { + return nil, err + } + + go func() { + + tailResponse := new(loghttp.TailResponse) + + for { + select { + case <-ctx.Done(): + close(out) + return + default: + err := unmarshal.ReadTailResponseJSON(tailResponse, wc) + if errors.Is(err, net.ErrClosed) { + close(out) + return + } + out <- TailResult{*tailResponse, err} + } + } + }() + return wc, nil +} + func (c *Client) request(ctx context.Context, method string, url string, extraHeaders ...Header) (*http.Request, error) { ctx = user.InjectOrgID(ctx, c.instanceID) req, err := http.NewRequestWithContext(ctx, method, url, nil) diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go index 42a1ed4a3ddcc..d85a3ae4a2299 100644 --- a/integration/loki_micro_services_test.go +++ b/integration/loki_micro_services_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "strings" + "sync" "testing" "time" @@ -66,7 +67,19 @@ func TestMicroServicesIngestQuery(t *testing.T) { ) require.NoError(t, clu.Run()) - // finally, run the query-frontend and querier. + // the run querier. + var ( + tQuerier = clu.AddComponent( + "querier", + "-target=querier", + "-querier.scheduler-address="+tQueryScheduler.GRPCURL(), + "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + "-common.compactor-address="+tCompactor.HTTPURL(), + ) + ) + require.NoError(t, clu.Run()) + + // finally, run the query-frontend. var ( tQueryFrontend = clu.AddComponent( "query-frontend", @@ -76,13 +89,8 @@ func TestMicroServicesIngestQuery(t *testing.T) { "-common.compactor-address="+tCompactor.HTTPURL(), "-querier.per-request-limits-enabled=true", "-frontend.encoding=protobuf", - ) - _ = clu.AddComponent( - "querier", - "-target=querier", - "-querier.scheduler-address="+tQueryScheduler.GRPCURL(), - "-boltdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), - "-common.compactor-address="+tCompactor.HTTPURL(), + "-querier.shard-aggregations=quantile_over_time", + "-frontend.tail-proxy-url="+tQuerier.HTTPURL(), ) ) require.NoError(t, clu.Run()) @@ -146,6 +154,47 @@ func TestMicroServicesIngestQuery(t *testing.T) { _, err := cliQueryFrontendLimited.LabelNames(context.Background()) require.ErrorContains(t, err, "the query time range exceeds the limit (query length") }) + + t.Run("tail", func(t *testing.T) { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + out := make(chan client.TailResult) + wc, err := cliQueryFrontend.Tail(ctx, `{job="fake"}`, out) + require.NoError(t, err) + defer wc.Close() + + var lines []string + mu := sync.Mutex{} + done := make(chan struct{}) + go func() { + for resp := range out { + require.NoError(t, resp.Err) + for _, stream := range resp.Response.Streams { + for _, e := range stream.Entries { + mu.Lock() + lines = append(lines, e.Line) + mu.Unlock() + } + } + } + done <- struct{}{} + }() + assert.Eventually( + t, + func() bool { + mu.Lock() + defer mu.Unlock() + return len(lines) == 4 + }, + 10*time.Second, + 100*time.Millisecond, + ) + wc.Close() + cancelFunc() + <-done + assert.ElementsMatch(t, []string{"lineA", "lineB", "lineC", "lineD"}, lines) + }) } func TestMicroServicesIngestQueryWithSchemaChange(t *testing.T) { diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 51e990451e8d2..0978e7f4de52d 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,6 +1,7 @@ ## Main - [11330](https://github.com/grafana/loki/pull/11330) **JoaoBraveCoding**: Update loki-mixins, reworked retention dashboard, new structure metadata write dashboard +- [11393](https://github.com/grafana/loki/pull/11393) **periklis**: Add infra annotations for OpenShift based deployments - [11094](https://github.com/grafana/loki/pull/11094) **periklis**: Add support for blocking queries per tenant - [11288](https://github.com/grafana/loki/pull/11288) **periklis**: Fix custom CA for object-store in ruler component - [11091](https://github.com/grafana/loki/pull/11091) **periklis**: Add automatic stream sharding support diff --git a/operator/Makefile b/operator/Makefile index 680b4f509ca3c..d77b26035e588 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -126,6 +126,12 @@ deps: go.mod go.sum go mod download go mod verify +.PHONY: deps-api +deps-api: apis/loki/go.mod apis/loki/go.sum + @cd ./apis/loki/ && go mod tidy + @cd ./apis/loki/ && go mod download + @cd ./apis/loki/ && go mod verify + .PHONY: cli cli: deps bin/loki-broker ## Build loki-broker CLI binary bin/loki-broker: $(GO_FILES) | generate @@ -152,10 +158,14 @@ manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. $(CONTROLLER_GEN) rbac:roleName=lokistack-manager crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases .PHONY: test -test: deps generate go-generate lint lint-prometheus manifests ## Run tests +test: deps deps-api generate go-generate lint lint-prometheus manifests test-unit-api ## Run tests test: $(GO_FILES) go test ./... -coverprofile cover.out +.PHONY: test-unit-api +test-unit-api: $(GO_FILES) + @cd ./apis/loki/ && go test ./... -coverprofile cover.out + .PHONY: test-unit-prometheus test-unit-prometheus: $(PROMTOOL) ## Run prometheus unit tests @$(PROMTOOL) test rules ./internal/manifests/internal/alerts/testdata/test.yaml diff --git a/operator/apis/loki/go.sum b/operator/apis/loki/go.sum index e5ee53324670d..3180306f546d4 100644 --- a/operator/apis/loki/go.sum +++ b/operator/apis/loki/go.sum @@ -45,8 +45,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -55,11 +53,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/operator/apis/loki/v1beta1/lokistack_types_test.go b/operator/apis/loki/v1beta1/lokistack_types_test.go index 2ccaa2c2c02d7..54aa4091e9ac9 100644 --- a/operator/apis/loki/v1beta1/lokistack_types_test.go +++ b/operator/apis/loki/v1beta1/lokistack_types_test.go @@ -352,7 +352,9 @@ func TestConvertToV1_LokiStack(t *testing.T) { Name: "test", }, TLS: &v1.ObjectStorageTLSSpec{ - CA: "test-ca", + CASpec: v1.CASpec{ + CA: "test-ca", + }, }, }, StorageClassName: "standard", @@ -380,8 +382,6 @@ func TestConvertToV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, QueryLimits: &v1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 1000, @@ -389,7 +389,7 @@ func TestConvertToV1_LokiStack(t *testing.T) { MaxQuerySeries: 10000, }, }, - Tenants: map[string]v1.LimitsTemplateSpec{ + Tenants: map[string]v1.PerTenantLimitsTemplateSpec{ "tenant-a": { IngestionLimits: &v1.IngestionLimitSpec{ IngestionRate: 100, @@ -399,13 +399,13 @@ func TestConvertToV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, - QueryLimits: &v1.QueryLimitSpec{ - MaxEntriesLimitPerQuery: 1000, - MaxChunksPerQuery: 1000, - MaxQuerySeries: 10000, + QueryLimits: &v1.PerTenantQueryLimitSpec{ + QueryLimitSpec: v1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 1000, + MaxChunksPerQuery: 1000, + MaxQuerySeries: 10000, + }, }, }, "tenant-b": { @@ -417,13 +417,13 @@ func TestConvertToV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, - QueryLimits: &v1.QueryLimitSpec{ - MaxEntriesLimitPerQuery: 1000, - MaxChunksPerQuery: 1000, - MaxQuerySeries: 10000, + QueryLimits: &v1.PerTenantQueryLimitSpec{ + QueryLimitSpec: v1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 1000, + MaxChunksPerQuery: 1000, + MaxQuerySeries: 10000, + }, }, }, }, @@ -683,7 +683,9 @@ func TestConvertFromV1_LokiStack(t *testing.T) { Name: "test", }, TLS: &v1.ObjectStorageTLSSpec{ - CA: "test-ca", + CASpec: v1.CASpec{ + CA: "test-ca", + }, }, }, StorageClassName: "standard", @@ -711,8 +713,6 @@ func TestConvertFromV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, QueryLimits: &v1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 1000, @@ -720,7 +720,7 @@ func TestConvertFromV1_LokiStack(t *testing.T) { MaxQuerySeries: 10000, }, }, - Tenants: map[string]v1.LimitsTemplateSpec{ + Tenants: map[string]v1.PerTenantLimitsTemplateSpec{ "tenant-a": { IngestionLimits: &v1.IngestionLimitSpec{ IngestionRate: 100, @@ -730,13 +730,13 @@ func TestConvertFromV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, - QueryLimits: &v1.QueryLimitSpec{ - MaxEntriesLimitPerQuery: 1000, - MaxChunksPerQuery: 1000, - MaxQuerySeries: 10000, + QueryLimits: &v1.PerTenantQueryLimitSpec{ + QueryLimitSpec: v1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 1000, + MaxChunksPerQuery: 1000, + MaxQuerySeries: 10000, + }, }, }, "tenant-b": { @@ -748,13 +748,13 @@ func TestConvertFromV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, - QueryLimits: &v1.QueryLimitSpec{ - MaxEntriesLimitPerQuery: 1000, - MaxChunksPerQuery: 1000, - MaxQuerySeries: 10000, + QueryLimits: &v1.PerTenantQueryLimitSpec{ + QueryLimitSpec: v1.QueryLimitSpec{ + MaxEntriesLimitPerQuery: 1000, + MaxChunksPerQuery: 1000, + MaxQuerySeries: 10000, + }, }, }, }, @@ -1014,8 +1014,6 @@ func TestConvertFromV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, QueryLimits: &v1beta1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 1000, @@ -1033,8 +1031,6 @@ func TestConvertFromV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, QueryLimits: &v1beta1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 1000, @@ -1051,8 +1047,6 @@ func TestConvertFromV1_LokiStack(t *testing.T) { MaxLabelNamesPerSeries: 1000, MaxGlobalStreamsPerTenant: 10000, MaxLineSize: 512, - PerStreamRateLimit: 10, - PerStreamRateLimitBurst: 20, }, QueryLimits: &v1beta1.QueryLimitSpec{ MaxEntriesLimitPerQuery: 1000, diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index f025ccc751d08..a315fd044f750 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,9 +150,16 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.5.0 - createdAt: "2023-11-03T11:44:16Z" + createdAt: "2023-12-06T06:30:12Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "true" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" operators.operatorframework.io/builder: operator-sdk-unknown operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/grafana/loki/tree/main/operator diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index f1724fe77d8a3..dc3ab04f245b0 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.5.0 - createdAt: "2023-11-03T11:44:14Z" + createdAt: "2023-12-06T06:30:10Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index 90bdb078cbf35..71f096c4fd269 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:0.1.0 - createdAt: "2023-11-03T11:44:18Z" + createdAt: "2023-12-06T06:30:15Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements @@ -160,6 +160,13 @@ metadata: Loki is a memory intensive application. The initial set of OCP nodes may not be large enough to support the Loki stack. Additional OCP nodes must be added to the OCP cluster if you desire to run with the recommended (or better) memory. + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "true" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" olm.skipRange: '>=5.7.0-0 <5.9.0' operatorframework.io/cluster-monitoring: "true" operatorframework.io/suggested-namespace: openshift-operators-redhat diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml index b171014a40694..c7eb60e5a3e3b 100644 --- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml @@ -10,6 +10,13 @@ metadata: createdAt: "2022-12-22T13:28:40+00:00" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "true" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" repository: https://github.com/grafana/loki/tree/main/operator support: Grafana Loki SIG Operator labels: diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml index 20c923c23cc8b..5483709ad5d66 100644 --- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml @@ -16,6 +16,13 @@ metadata: Loki is a memory intensive application. The initial set of OCP nodes may not be large enough to support the Loki stack. Additional OCP nodes must be added to the OCP cluster if you desire to run with the recommended (or better) memory. + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "true" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" olm.skipRange: '>=5.7.0-0 <5.9.0' operatorframework.io/cluster-monitoring: "true" operatorframework.io/suggested-namespace: openshift-operators-redhat diff --git a/operator/hack/deploy-azure-storage-secret.sh b/operator/hack/deploy-azure-storage-secret.sh new file mode 100755 index 0000000000000..bf99c66aae1d3 --- /dev/null +++ b/operator/hack/deploy-azure-storage-secret.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -euo pipefail + +readonly account_name="${1-}" +readonly container_name="${2-}" + +if [[ -z "${account_name}" ]]; then + echo "Provide a account name" + exit 1 +fi + +if [[ -z "${container_name}" ]]; then + echo "Provide a container name" + exit 1 +fi + +readonly namespace="${NAMESPACE:-openshift-logging}" + +readonly azure_environment="AzureGlobal" + +resource_group=$(az storage account show --name "${account_name}" | jq -r '.resourceGroup') +readonly resource_group + +account_key=$(az storage account keys list --resource-group "${resource_group}" --account-name "${account_name}" | jq -r '.[0].value') +readonly account_key + +kubectl --ignore-not-found=true -n "${namespace}" delete secret test +kubectl -n "${namespace}" create secret generic test \ + --from-literal=environment="$(echo -n "${azure_environment}")" \ + --from-literal=account_name="$(echo -n "${account_name}")" \ + --from-literal=account_key="$(echo -n "${account_key}")" \ + --from-literal=container="$(echo -n "${container_name}")" diff --git a/operator/hack/lokistack_gateway_ocp_azure.yaml b/operator/hack/lokistack_gateway_ocp_azure.yaml new file mode 100644 index 0000000000000..3e38ef5b68a7f --- /dev/null +++ b/operator/hack/lokistack_gateway_ocp_azure.yaml @@ -0,0 +1,25 @@ +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: lokistack-dev + namespace: openshift-logging +spec: + size: 1x.demo + storage: + schemas: + - version: v13 + effectiveDate: 2023-10-15 + secret: + name: test + type: azure + storageClassName: managed-csi + tenants: + mode: openshift-logging + rules: + enabled: true + selector: + matchLabels: + openshift.io/cluster-monitoring: "true" + namespaceSelector: + matchLabels: + openshift.io/cluster-monitoring: "true" diff --git a/pkg/bloomcompactor/TODO.md b/pkg/bloomcompactor/TODO.md index 2d963841b854c..865ec542b4e67 100644 --- a/pkg/bloomcompactor/TODO.md +++ b/pkg/bloomcompactor/TODO.md @@ -1,2 +1,3 @@ * Use tarGz, untarGz before uploding blocks to storage * Introduce back `maxLookBackPeriod` as `RejectOldSamplesMaxAge` limit in distributors +* restrict block size during creation. Suggestion to use cut logic we use for memchunks. Suggested start size is 500mb \ No newline at end of file diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index 9eb7ed11ca5db..bead6f79c320a 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -29,8 +29,6 @@ import ( "fmt" "math" "os" - "path/filepath" - "sort" "time" "github.com/go-kit/log" @@ -44,12 +42,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" - "github.com/grafana/loki/pkg/compactor/retention" - "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/storage" v1 "github.com/grafana/loki/pkg/storage/bloom/v1" - "github.com/grafana/loki/pkg/storage/bloom/v1/filter" - "github.com/grafana/loki/pkg/storage/chunk" chunk_client "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" @@ -61,12 +55,6 @@ import ( "github.com/grafana/loki/pkg/util" ) -// TODO: Make a constants file somewhere -const ( - bloomFileName = "bloom" - seriesFileName = "series" -) - type Compactor struct { services.Service @@ -354,46 +342,51 @@ func (c *Compactor) compactTenant(ctx context.Context, logger log.Logger, sc sto NGramSkip := c.limits.BloomNGramSkip(tenant) bt, _ := v1.NewBloomTokenizer(c.reg, NGramLength, NGramSkip) - // TODO: Use ForEachConcurrent? errs := multierror.New() if err := sc.indexShipper.ForEach(ctx, tableName, tenant, func(isMultiTenantIndex bool, idx shipperindex.Index) error { - if isMultiTenantIndex { + if isMultiTenantIndex { // TODO: handle multitenant tables return fmt.Errorf("unexpected multi-tenant") } + var seriesMetas []seriesMeta // TODO: Make these casts safely if err := idx.(*tsdb.TSDBFile).Index.(*tsdb.TSDBIndex).ForSeries( ctx, nil, 0, math.MaxInt64, // TODO: Replace with MaxLookBackPeriod func(labels labels.Labels, fingerprint model.Fingerprint, chksMetas []tsdbindex.ChunkMeta) { - job := NewJob(tenant, tableName, idx.Path(), fingerprint, labels, chksMetas) - jobLogger := log.With(logger, "job", job.String()) + // TODO: Inefficient as is, calls the ring per fingerprint. Refactor to make the call once per compaction fingerprint bounds. + ownsFingerprint, err := c.sharding.OwnsFingerprint(tenant, uint64(fingerprint)) - ownsJob, err := c.sharding.OwnsJob(job) if err != nil { - c.metrics.compactionRunUnownedJobs.Inc() - level.Error(jobLogger).Log("msg", "failed to check if compactor owns job", "err", err) + level.Error(logger).Log("msg", "failed to check if compactor owns fp", "err", err) errs.Add(err) return } - if !ownsJob { - c.metrics.compactionRunUnownedJobs.Inc() - level.Debug(jobLogger).Log("msg", "skipping job because it is not owned by this shard") - return - } - - if err := c.runCompact(ctx, jobLogger, job, c.bloomShipperClient, bt, sc); err != nil { - c.metrics.compactionRunFailedJobs.Inc() - errs.Add(errors.Wrap(err, "runBloomCompact")) + if !ownsFingerprint { return } - c.metrics.compactionRunSucceededJobs.Inc() + temp := make([]tsdbindex.ChunkMeta, len(chksMetas)) + _ = copy(temp, chksMetas) + //All seriesMetas given a table within fp of this compactor shard + seriesMetas = append(seriesMetas, seriesMeta{seriesFP: fingerprint, seriesLbs: labels, chunkRefs: temp}) }, ); err != nil { errs.Add(err) } + job := NewJob(tenant, tableName, idx.Path(), seriesMetas) + jobLogger := log.With(logger, "job", job.String()) + c.metrics.compactionRunJobStarted.Inc() + + if err := c.runCompact(ctx, jobLogger, job, bt, sc); err != nil { + c.metrics.compactionRunJobFailed.Inc() + errs.Add(errors.Wrap(err, "runBloomCompact failed")) + return errs.Err() + } + + c.metrics.compactionRunJobSuceeded.Inc() + return nil }); err != nil { errs.Add(err) @@ -440,135 +433,15 @@ func (c *Compactor) compactTenantWithRetries(ctx context.Context, logger log.Log ) } -func makeChunkRefs(chksMetas []tsdbindex.ChunkMeta, tenant string, fp model.Fingerprint) []chunk.Chunk { - chunkRefs := make([]chunk.Chunk, 0, len(chksMetas)) - for _, chk := range chksMetas { - chunkRefs = append(chunkRefs, chunk.Chunk{ - ChunkRef: logproto.ChunkRef{ - Fingerprint: uint64(fp), - UserID: tenant, - From: chk.From(), - Through: chk.Through(), - Checksum: chk.Checksum, - }, - }) - } - - return chunkRefs -} - -// TODO Revisit this step once v1/bloom lib updated to combine blooms in the same series -func buildBloomBlock( - ctx context.Context, - logger log.Logger, - options v1.BlockOptions, - bloomForChks v1.SeriesWithBloom, - job Job, - workingDir string, -) (bloomshipper.Block, error) { - // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). - if err := ctx.Err(); err != nil { - return bloomshipper.Block{}, err - } - - localDst := createLocalDirName(workingDir, job) - - // write bloom to a local dir - builder, err := v1.NewBlockBuilder(options, v1.NewDirectoryBlockWriter(localDst)) - if err != nil { - level.Error(logger).Log("creating builder", err) - return bloomshipper.Block{}, err - } - - checksum, err := builder.BuildFrom(v1.NewSliceIter([]v1.SeriesWithBloom{bloomForChks})) - if err != nil { - level.Error(logger).Log("writing bloom", err) - return bloomshipper.Block{}, err - } - - blockFile, err := os.Open(filepath.Join(localDst, bloomFileName)) - if err != nil { - level.Error(logger).Log("reading bloomBlock", err) - } - - blocks := bloomshipper.Block{ - BlockRef: bloomshipper.BlockRef{ - Ref: bloomshipper.Ref{ - TenantID: job.Tenant(), - TableName: job.TableName(), - MinFingerprint: uint64(job.Fingerprint()), // TODO will change once we compact multiple blooms into a block - MaxFingerprint: uint64(job.Fingerprint()), - StartTimestamp: job.From().Unix(), - EndTimestamp: job.Through().Unix(), - Checksum: checksum, - }, - IndexPath: job.IndexPath(), - }, - Data: blockFile, - } - - return blocks, nil -} - -func createLocalDirName(workingDir string, job Job) string { - dir := fmt.Sprintf("bloomBlock-%s-%s-%s-%s-%s-%s", job.TableName(), job.Tenant(), job.Fingerprint(), job.Fingerprint(), job.From(), job.Through()) - return filepath.Join(workingDir, dir) -} - -// Compacts given list of chunks, uploads them to storage and returns a list of bloomBlocks -func CompactNewChunks( - ctx context.Context, - logger log.Logger, - job Job, - chunks []chunk.Chunk, - bt *v1.BloomTokenizer, - fpRate float64, - bloomShipperClient bloomshipper.Client, - dst string, -) ([]bloomshipper.Block, error) { - // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). - if err := ctx.Err(); err != nil { - return nil, err - } - - // Create a bloom for this series - bloomForChks := v1.SeriesWithBloom{ - Series: &v1.Series{ - Fingerprint: job.Fingerprint(), - }, - Bloom: &v1.Bloom{ - ScalableBloomFilter: *filter.NewDefaultScalableBloomFilter(fpRate), - }, - } - - // Tokenize data into n-grams - bt.PopulateSeriesWithBloom(&bloomForChks, chunks) - - // Build and upload bloomBlock to storage - blockOptions := v1.NewBlockOptions(bt.GetNGramLength(), bt.GetNGramSkip()) - blocks, err := buildBloomBlock(ctx, logger, blockOptions, bloomForChks, job, dst) - if err != nil { - level.Error(logger).Log("building bloomBlocks", err) - return nil, err - } - storedBlocks, err := bloomShipperClient.PutBlocks(ctx, []bloomshipper.Block{blocks}) - if err != nil { - level.Error(logger).Log("putting blocks to storage", err) - return nil, err - } - return storedBlocks, nil -} - -func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, bloomShipperClient bloomshipper.Client, bt *v1.BloomTokenizer, storeClient storeClient) error { +func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, bt *v1.BloomTokenizer, storeClient storeClient) error { // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). if err := ctx.Err(); err != nil { return err } - metaSearchParams := bloomshipper.MetaSearchParams{ TenantID: job.tenantID, - MinFingerprint: uint64(job.seriesFP), - MaxFingerprint: uint64(job.seriesFP), + MinFingerprint: uint64(job.minFp), + MaxFingerprint: uint64(job.maxFp), StartTimestamp: int64(job.from), EndTimestamp: int64(job.through), } @@ -577,35 +450,56 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, var bloomBlocksRefs []bloomshipper.BlockRef var tombstonedBlockRefs []bloomshipper.BlockRef - metas, err := bloomShipperClient.GetMetas(ctx, metaSearchParams) + metas, err := c.bloomShipperClient.GetMetas(ctx, metaSearchParams) if err != nil { return err } if len(metas) == 0 { - // Get chunks data from list of chunkRefs - chks, err := storeClient.chunk.GetChunks(ctx, makeChunkRefs(job.Chunks(), job.Tenant(), job.Fingerprint())) + localDst := createLocalDirName(c.cfg.WorkingDirectory, job) + defer func() { + //clean up the bloom directory + if err := os.RemoveAll(localDst); err != nil { + level.Error(logger).Log("msg", "failed to remove block directory", "dir", localDst, "err", err) + } + }() + + blockOptions := v1.NewBlockOptions(bt.GetNGramLength(), bt.GetNGramSkip()) + builder, err := NewPersistentBlockBuilder(localDst, blockOptions) if err != nil { + level.Error(logger).Log("msg", "creating block builder", "err", err) return err } - fpRate := c.limits.BloomFalsePositiveRate(job.Tenant()) - storedBlocks, err := CompactNewChunks(ctx, logger, job, chks, bt, fpRate, bloomShipperClient, c.cfg.WorkingDirectory) + fpRate := c.limits.BloomFalsePositiveRate(job.tenantID) + storedBlock, err := compactNewChunks(ctx, logger, job, fpRate, bt, storeClient.chunk, builder) if err != nil { - return level.Error(logger).Log("compacting new chunks", err) + return level.Error(logger).Log("msg", "failed to compact new chunks", "err", err) } - storedBlockRefs := make([]bloomshipper.BlockRef, len(storedBlocks)) - - for i, block := range storedBlocks { - storedBlockRefs[i] = block.BlockRef + // Do not change the signature of PutBlocks yet. + // Once block size is limited potentially, compactNewChunks will return multiple blocks, hence a list is appropriate. + storedBlocks, err := c.bloomShipperClient.PutBlocks(ctx, []bloomshipper.Block{storedBlock}) + if err != nil { + level.Error(logger).Log("msg", "putting blocks to storage", "err", err) + return err } // all blocks are new and active blocks - bloomBlocksRefs = storedBlockRefs + for _, block := range storedBlocks { + bloomBlocksRefs = append(bloomBlocksRefs, block.BlockRef) + } } else { // TODO complete part 2 - periodic compaction for delta from previous period // When already compacted metas exists + + // Take the seriesFP, query the org_chunks from storage and query the blooms. + // compare the checksums of the indexes + // if they match - all good nothing to do + //else { + //get all chunks + //} + // Deduplicate index paths uniqueIndexPaths := make(map[string]struct{}) @@ -626,37 +520,10 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, Tombstones: tombstonedBlockRefs, Blocks: bloomBlocksRefs, } - err = bloomShipperClient.PutMeta(ctx, meta) + err = c.bloomShipperClient.PutMeta(ctx, meta) if err != nil { - level.Error(logger).Log("putting meta.json to storage", err) + level.Error(logger).Log("msg", "putting meta.json to storage", "err", err) return err } return nil } - -func getIntervalsForTables(tables []string) map[string]model.Interval { - tablesIntervals := make(map[string]model.Interval, len(tables)) - for _, table := range tables { - tablesIntervals[table] = retention.ExtractIntervalFromTableName(table) - } - - return tablesIntervals -} - -func sortTablesByRange(tables []string, intervals map[string]model.Interval) { - sort.Slice(tables, func(i, j int) bool { - // less than if start time is after produces a most recent first sort order - return intervals[tables[i]].Start.After(intervals[tables[j]].Start) - }) -} - -// TODO: comes from pkg/compactor/compactor.go -func schemaPeriodForTable(cfg config.SchemaConfig, tableName string) (config.PeriodConfig, bool) { - tableInterval := retention.ExtractIntervalFromTableName(tableName) - schemaCfg, err := cfg.SchemaForTime(tableInterval.Start) - if err != nil || schemaCfg.IndexTables.TableFor(tableInterval.Start) != tableName { - return config.PeriodConfig{}, false - } - - return schemaCfg, true -} diff --git a/pkg/bloomcompactor/chunkcompactor.go b/pkg/bloomcompactor/chunkcompactor.go new file mode 100644 index 0000000000000..ed7420a6aa1b4 --- /dev/null +++ b/pkg/bloomcompactor/chunkcompactor.go @@ -0,0 +1,184 @@ +package bloomcompactor + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/logproto" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/grafana/loki/pkg/storage/bloom/v1/filter" + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" + tsdbindex "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index" +) + +type compactorTokenizer interface { + PopulateSeriesWithBloom(bloom *v1.SeriesWithBloom, chunks []chunk.Chunk) error +} + +type chunkClient interface { + // TODO: Consider using lazyChunks to avoid downloading all requested chunks. + GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) +} + +type blockBuilder interface { + BuildFrom(itr v1.Iterator[v1.SeriesWithBloom]) (uint32, error) + Data() (io.ReadCloser, error) +} + +type PersistentBlockBuilder struct { + builder *v1.BlockBuilder + localDst string +} + +func NewPersistentBlockBuilder(localDst string, blockOptions v1.BlockOptions) (*PersistentBlockBuilder, error) { + // write bloom to a local dir + b, err := v1.NewBlockBuilder(blockOptions, v1.NewDirectoryBlockWriter(localDst)) + if err != nil { + return nil, err + } + builder := PersistentBlockBuilder{ + builder: b, + localDst: localDst, + } + return &builder, nil +} + +func (p *PersistentBlockBuilder) BuildFrom(itr v1.Iterator[v1.SeriesWithBloom]) (uint32, error) { + return p.builder.BuildFrom(itr) +} + +func (p *PersistentBlockBuilder) Data() (io.ReadCloser, error) { + blockFile, err := os.Open(filepath.Join(p.localDst, v1.BloomFileName)) + if err != nil { + return nil, err + } + return blockFile, nil +} + +func makeChunkRefs(chksMetas []tsdbindex.ChunkMeta, tenant string, fp model.Fingerprint) []chunk.Chunk { + chunkRefs := make([]chunk.Chunk, 0, len(chksMetas)) + for _, chk := range chksMetas { + chunkRefs = append(chunkRefs, chunk.Chunk{ + ChunkRef: logproto.ChunkRef{ + Fingerprint: uint64(fp), + UserID: tenant, + From: chk.From(), + Through: chk.Through(), + Checksum: chk.Checksum, + }, + }) + } + + return chunkRefs +} + +func buildBloomFromSeries(seriesMeta seriesMeta, fpRate float64, tokenizer compactorTokenizer, chunks []chunk.Chunk) v1.SeriesWithBloom { + // Create a bloom for this series + bloomForChks := v1.SeriesWithBloom{ + Series: &v1.Series{ + Fingerprint: seriesMeta.seriesFP, + }, + Bloom: &v1.Bloom{ + ScalableBloomFilter: *filter.NewDefaultScalableBloomFilter(fpRate), + }, + } + + // Tokenize data into n-grams + _ = tokenizer.PopulateSeriesWithBloom(&bloomForChks, chunks) + return bloomForChks +} + +// TODO Test this when bloom block size check is implemented +func buildBlockFromBlooms( + ctx context.Context, + logger log.Logger, + builder blockBuilder, + blooms []v1.SeriesWithBloom, + job Job, +) (bloomshipper.Block, error) { + // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). + if err := ctx.Err(); err != nil { + return bloomshipper.Block{}, err + } + + checksum, err := builder.BuildFrom(v1.NewSliceIter(blooms)) + if err != nil { + level.Error(logger).Log("msg", "failed writing to bloom", "err", err) + return bloomshipper.Block{}, err + } + + data, err := builder.Data() + if err != nil { + level.Error(logger).Log("msg", "failed reading bloom data", "err", err) + return bloomshipper.Block{}, err + } + + block := bloomshipper.Block{ + BlockRef: bloomshipper.BlockRef{ + Ref: bloomshipper.Ref{ + TenantID: job.tenantID, + TableName: job.tableName, + MinFingerprint: uint64(job.minFp), + MaxFingerprint: uint64(job.maxFp), + StartTimestamp: int64(job.from), + EndTimestamp: int64(job.through), + Checksum: checksum, + }, + IndexPath: job.indexPath, + }, + Data: data, + } + + return block, nil +} + +func createLocalDirName(workingDir string, job Job) string { + dir := fmt.Sprintf("bloomBlock-%s-%s-%s-%s-%s-%s", job.tableName, job.tenantID, job.minFp, job.maxFp, job.from, job.through) + return filepath.Join(workingDir, dir) +} + +// Compacts given list of chunks, uploads them to storage and returns a list of bloomBlocks +func compactNewChunks( + ctx context.Context, + logger log.Logger, + job Job, + fpRate float64, + bt compactorTokenizer, + storeClient chunkClient, + builder blockBuilder, +) (bloomshipper.Block, error) { + // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). + if err := ctx.Err(); err != nil { + return bloomshipper.Block{}, err + } + + blooms := make([]v1.SeriesWithBloom, len(job.seriesMetas)) + + for _, seriesMeta := range job.seriesMetas { + // Get chunks data from list of chunkRefs + chks, err := storeClient.GetChunks(ctx, makeChunkRefs(seriesMeta.chunkRefs, job.tenantID, seriesMeta.seriesFP)) + if err != nil { + return bloomshipper.Block{}, err + } + + bloom := buildBloomFromSeries(seriesMeta, fpRate, bt, chks) + blooms = append(blooms, bloom) + } + + // Build and upload bloomBlock to storage + block, err := buildBlockFromBlooms(ctx, logger, builder, blooms, job) + if err != nil { + level.Error(logger).Log("msg", "building bloomBlocks", "err", err) + return bloomshipper.Block{}, err + } + + return block, nil +} diff --git a/pkg/bloomcompactor/chunkcompactor_test.go b/pkg/bloomcompactor/chunkcompactor_test.go new file mode 100644 index 0000000000000..cfe6497089d66 --- /dev/null +++ b/pkg/bloomcompactor/chunkcompactor_test.go @@ -0,0 +1,152 @@ +package bloomcompactor + +import ( + "context" + "io" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/chunkenc" + "github.com/grafana/loki/pkg/push" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/grafana/loki/pkg/storage/chunk" + "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index" +) + +var ( + userID = "userID" + fpRate = 0.01 + + from = model.Earliest + to = model.Latest + + table = "test_table" + indexPath = "index_test_table" + + testBlockSize = 256 * 1024 + testTargetSize = 1500 * 1024 +) + +func createTestChunk(fp model.Fingerprint, lb labels.Labels) chunk.Chunk { + memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), testBlockSize, testTargetSize) + if err := memChunk.Append(&push.Entry{ + Timestamp: time.Unix(0, 1), + Line: "this is a log line", + }); err != nil { + panic(err) + } + c := chunk.NewChunk(userID, + fp, lb, chunkenc.NewFacade(memChunk, testBlockSize, testTargetSize), from, to) + + return c +} + +// Given a seriesMeta and corresponding chunks verify SeriesWithBloom can be built +func TestChunkCompactor_BuildBloomFromSeries(t *testing.T) { + label := labels.FromStrings("foo", "bar") + fp := model.Fingerprint(label.Hash()) + seriesMeta := seriesMeta{ + seriesFP: fp, + seriesLbs: label, + } + + chunks := []chunk.Chunk{createTestChunk(fp, label)} + + mbt := mockBloomTokenizer{} + bloom := buildBloomFromSeries(seriesMeta, fpRate, &mbt, chunks) + require.Equal(t, seriesMeta.seriesFP, bloom.Series.Fingerprint) + require.Equal(t, chunks, mbt.chunks) +} + +func TestChunkCompactor_CompactNewChunks(t *testing.T) { + // Setup + logger := log.NewNopLogger() + label := labels.FromStrings("foo", "bar") + fp1 := model.Fingerprint(100) + fp2 := model.Fingerprint(999) + fp3 := model.Fingerprint(200) + + chunkRef1 := index.ChunkMeta{ + Checksum: 1, + MinTime: 1, + MaxTime: 99, + } + + chunkRef2 := index.ChunkMeta{ + Checksum: 2, + MinTime: 10, + MaxTime: 999, + } + + seriesMetas := []seriesMeta{ + { + seriesFP: fp1, + seriesLbs: label, + chunkRefs: []index.ChunkMeta{chunkRef1}, + }, + { + seriesFP: fp2, + seriesLbs: label, + chunkRefs: []index.ChunkMeta{chunkRef1, chunkRef2}, + }, + { + seriesFP: fp3, + seriesLbs: label, + chunkRefs: []index.ChunkMeta{chunkRef1, chunkRef1, chunkRef2}, + }, + } + + job := NewJob(userID, table, indexPath, seriesMetas) + + mbt := mockBloomTokenizer{} + mcc := mockChunkClient{} + pbb := mockPersistentBlockBuilder{} + + // Run Compaction + compactedBlock, err := compactNewChunks(context.Background(), logger, job, fpRate, &mbt, &mcc, &pbb) + + // Validate Compaction Succeeds + require.NoError(t, err) + require.NotNil(t, compactedBlock) + + // Validate Compacted Block has expected data + require.Equal(t, job.tenantID, compactedBlock.TenantID) + require.Equal(t, job.tableName, compactedBlock.TableName) + require.Equal(t, uint64(fp1), compactedBlock.MinFingerprint) + require.Equal(t, uint64(fp2), compactedBlock.MaxFingerprint) + require.Equal(t, chunkRef1.MinTime, compactedBlock.StartTimestamp) + require.Equal(t, chunkRef2.MaxTime, compactedBlock.EndTimestamp) + require.Equal(t, indexPath, compactedBlock.IndexPath) +} + +type mockBloomTokenizer struct { + chunks []chunk.Chunk +} + +func (mbt *mockBloomTokenizer) PopulateSeriesWithBloom(_ *v1.SeriesWithBloom, c []chunk.Chunk) error { + mbt.chunks = c + return nil +} + +type mockChunkClient struct{} + +func (mcc *mockChunkClient) GetChunks(_ context.Context, _ []chunk.Chunk) ([]chunk.Chunk, error) { + return nil, nil +} + +type mockPersistentBlockBuilder struct { +} + +func (pbb *mockPersistentBlockBuilder) BuildFrom(_ v1.Iterator[v1.SeriesWithBloom]) (uint32, error) { + return 0, nil +} + +func (pbb *mockPersistentBlockBuilder) Data() (io.ReadCloser, error) { + return nil, nil +} diff --git a/pkg/bloomcompactor/job.go b/pkg/bloomcompactor/job.go index 3084b7db7c34b..bd43293c73cb6 100644 --- a/pkg/bloomcompactor/job.go +++ b/pkg/bloomcompactor/job.go @@ -1,20 +1,27 @@ package bloomcompactor import ( + "math" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index" ) +type seriesMeta struct { + seriesFP model.Fingerprint + seriesLbs labels.Labels + chunkRefs []index.ChunkMeta +} + type Job struct { tableName, tenantID, indexPath string - seriesLbs labels.Labels - seriesFP model.Fingerprint - chunks []index.ChunkMeta + seriesMetas []seriesMeta // We compute them lazily. Unset value is 0. from, through model.Time + minFp, maxFp model.Fingerprint } // NewJob returns a new compaction Job. @@ -22,80 +29,57 @@ func NewJob( tenantID string, tableName string, indexPath string, - seriesFP model.Fingerprint, - seriesLbs labels.Labels, - chunks []index.ChunkMeta, + seriesMetas []seriesMeta, ) Job { - return Job{ - tenantID: tenantID, - tableName: tableName, - indexPath: indexPath, - seriesFP: seriesFP, - seriesLbs: seriesLbs, - chunks: chunks, + j := Job{ + tenantID: tenantID, + tableName: tableName, + indexPath: indexPath, + seriesMetas: seriesMetas, } + j.computeBounds() + return j } func (j *Job) String() string { - return j.tableName + "_" + j.tenantID + "_" + j.seriesFP.String() -} - -func (j *Job) TableName() string { - return j.tableName -} - -func (j *Job) Tenant() string { - return j.tenantID -} - -func (j *Job) Fingerprint() model.Fingerprint { - return j.seriesFP -} - -func (j *Job) Chunks() []index.ChunkMeta { - return j.chunks -} - -func (j *Job) Labels() labels.Labels { - return j.seriesLbs -} - -func (j *Job) IndexPath() string { - return j.indexPath -} - -func (j *Job) From() model.Time { - if j.from == 0 { - j.computeFromThrough() - } - return j.from + return j.tableName + "_" + j.tenantID + "_" } -func (j *Job) Through() model.Time { - if j.through == 0 { - j.computeFromThrough() - } - return j.through -} - -func (j *Job) computeFromThrough() { - if len(j.chunks) == 0 { +func (j *Job) computeBounds() { + if len(j.seriesMetas) == 0 { return } minFrom := model.Latest maxThrough := model.Earliest - for _, chunk := range j.chunks { - from, through := chunk.Bounds() - if minFrom > from { - minFrom = from + minFp := model.Fingerprint(math.MaxInt64) + maxFp := model.Fingerprint(0) + + for _, seriesMeta := range j.seriesMetas { + // calculate timestamp boundaries + for _, chunkRef := range seriesMeta.chunkRefs { + from, through := chunkRef.Bounds() + if minFrom > from { + minFrom = from + } + if maxThrough < through { + maxThrough = through + } } - if maxThrough < through { - maxThrough = through + + // calculate fingerprint boundaries + if minFp > seriesMeta.seriesFP { + minFp = seriesMeta.seriesFP + } + if maxFp < seriesMeta.seriesFP { + maxFp = seriesMeta.seriesFP } } j.from = minFrom j.through = maxThrough + + j.minFp = minFp + j.maxFp = maxFp } diff --git a/pkg/bloomcompactor/metrics.go b/pkg/bloomcompactor/metrics.go index 9baa7128d25e1..c043b8103c31d 100644 --- a/pkg/bloomcompactor/metrics.go +++ b/pkg/bloomcompactor/metrics.go @@ -18,9 +18,9 @@ type metrics struct { compactionRunSkippedTenants prometheus.Counter compactionRunSucceededTenants prometheus.Counter compactionRunFailedTenants prometheus.Counter - compactionRunUnownedJobs prometheus.Counter - compactionRunSucceededJobs prometheus.Counter - compactionRunFailedJobs prometheus.Counter + compactionRunJobStarted prometheus.Counter + compactionRunJobSuceeded prometheus.Counter + compactionRunJobFailed prometheus.Counter compactionRunInterval prometheus.Gauge compactorRunning prometheus.Gauge } @@ -69,22 +69,22 @@ func newMetrics(r prometheus.Registerer) *metrics { Name: "tenants_failed", Help: "Number of tenants failed processing during the current compaction run", }), - compactionRunUnownedJobs: promauto.With(r).NewCounter(prometheus.CounterOpts{ + compactionRunJobStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: metricsSubsystem, - Name: "jobs_unowned", - Help: "Number of unowned jobs skipped during the current compaction run", + Name: "job_started", + Help: "Number of jobs started processing during the current compaction run", }), - compactionRunSucceededJobs: promauto.With(r).NewCounter(prometheus.CounterOpts{ + compactionRunJobSuceeded: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: metricsSubsystem, - Name: "jobs_succeeded", + Name: "job_succeeded", Help: "Number of jobs successfully processed during the current compaction run", }), - compactionRunFailedJobs: promauto.With(r).NewCounter(prometheus.CounterOpts{ + compactionRunJobFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: metricsSubsystem, - Name: "jobs_failed", + Name: "job_failed", Help: "Number of jobs failed processing during the current compaction run", }), compactionRunInterval: promauto.With(r).NewGauge(prometheus.GaugeOpts{ diff --git a/pkg/bloomcompactor/sharding.go b/pkg/bloomcompactor/sharding.go index ecbfe06a4c17a..9b3009bd50652 100644 --- a/pkg/bloomcompactor/sharding.go +++ b/pkg/bloomcompactor/sharding.go @@ -14,7 +14,7 @@ var ( // ShardingStrategy describes whether compactor "owns" given user or job. type ShardingStrategy interface { util_ring.TenantSharding - OwnsJob(job Job) (bool, error) + OwnsFingerprint(tenantID string, fp uint64) (bool, error) } type ShuffleShardingStrategy struct { @@ -31,15 +31,15 @@ func NewShuffleShardingStrategy(r *ring.Ring, ringLifecycler *ring.BasicLifecycl return &s } -// OwnsJob makes sure only a single compactor should execute the job. -func (s *ShuffleShardingStrategy) OwnsJob(job Job) (bool, error) { - if !s.OwnsTenant(job.Tenant()) { +// OwnsFingerprint makes sure only a single compactor processes the fingerprint. +func (s *ShuffleShardingStrategy) OwnsFingerprint(tenantID string, fp uint64) (bool, error) { + if !s.OwnsTenant(tenantID) { return false, nil } - tenantRing := s.GetTenantSubRing(job.Tenant()) + tenantRing := s.GetTenantSubRing(tenantID) fpSharding := util_ring.NewFingerprintShuffleSharding(tenantRing, s.ringLifeCycler, RingOp) - return fpSharding.OwnsFingerprint(uint64(job.Fingerprint())) + return fpSharding.OwnsFingerprint(fp) } // NoopStrategy is an implementation of the ShardingStrategy that does not @@ -48,8 +48,8 @@ type NoopStrategy struct { util_ring.NoopStrategy } -// OwnsJob implements TenantShuffleSharding. -func (s *NoopStrategy) OwnsJob(_ Job) (bool, error) { +// OwnsFingerprint implements TenantShuffleSharding. +func (s *NoopStrategy) OwnsFingerprint(_ string, _ uint64) (bool, error) { return true, nil } diff --git a/pkg/bloomcompactor/sharding_test.go b/pkg/bloomcompactor/sharding_test.go index 69ef14bb9d272..fc77536f6061f 100644 --- a/pkg/bloomcompactor/sharding_test.go +++ b/pkg/bloomcompactor/sharding_test.go @@ -90,13 +90,13 @@ func TestShuffleSharding(t *testing.T) { for j := 0; j < jobsPerTenant; j++ { lbls := labels.FromStrings("namespace", fmt.Sprintf("namespace-%d", j)) - job := NewJob(tenant, "", "", model.Fingerprint(lbls.Hash()), lbls, nil) - ownsJob, err := shard.OwnsJob(job) + fp := model.Fingerprint(lbls.Hash()) + ownsFingerprint, err := shard.OwnsFingerprint(tenant, uint64(fp)) require.NoError(t, err) var jobOwnedByOther int for _, other := range otherShards { - otherOwns, err := other.OwnsJob(job) + otherOwns, err := other.OwnsFingerprint(tenant, uint64(fp)) require.NoError(t, err) if otherOwns { jobOwnedByOther++ @@ -105,7 +105,7 @@ func TestShuffleSharding(t *testing.T) { // If this shard owns the job, no one else should own the job. // And if this shard doesn't own the job, only one of the other shards should own the job. - if ownsJob { + if ownsFingerprint { require.Equal(t, 0, jobOwnedByOther) ownedJobs++ } else { diff --git a/pkg/bloomcompactor/table_utils.go b/pkg/bloomcompactor/table_utils.go new file mode 100644 index 0000000000000..91940f4cfd455 --- /dev/null +++ b/pkg/bloomcompactor/table_utils.go @@ -0,0 +1,37 @@ +package bloomcompactor + +import ( + "sort" + + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/compactor/retention" + "github.com/grafana/loki/pkg/storage/config" +) + +func getIntervalsForTables(tables []string) map[string]model.Interval { + tablesIntervals := make(map[string]model.Interval, len(tables)) + for _, table := range tables { + tablesIntervals[table] = retention.ExtractIntervalFromTableName(table) + } + + return tablesIntervals +} + +func sortTablesByRange(tables []string, intervals map[string]model.Interval) { + sort.Slice(tables, func(i, j int) bool { + // less than if start time is after produces a most recent first sort order + return intervals[tables[i]].Start.After(intervals[tables[j]].Start) + }) +} + +// TODO: comes from pkg/compactor/compactor.go +func schemaPeriodForTable(cfg config.SchemaConfig, tableName string) (config.PeriodConfig, bool) { + tableInterval := retention.ExtractIntervalFromTableName(tableName) + schemaCfg, err := cfg.SchemaForTime(tableInterval.Start) + if err != nil || schemaCfg.IndexTables.TableFor(tableInterval.Start) != tableName { + return config.PeriodConfig{}, false + } + + return schemaCfg, true +} diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index 0f6d53dc17cd7..d7963daf50b43 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -74,6 +74,11 @@ const ( metricsSubsystem = "bloom_gateway" ) +var ( + // responsesPool pooling array of v1.Output [64, 128, 256, ..., 65536] + responsesPool = queue.NewSlicePool[v1.Output](1<<6, 1<<16, 2) +) + type metrics struct { queueDuration prometheus.Histogram inflightRequests prometheus.Summary @@ -158,6 +163,14 @@ type Gateway struct { workerConfig workerConfig } +type fixedQueueLimits struct { + maxConsumers int +} + +func (l *fixedQueueLimits) MaxConsumers(_ string, _ int) int { + return l.maxConsumers +} + // New returns a new instance of the Bloom Gateway. func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, overrides Limits, shardingStrategy ShardingStrategy, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) { g := &Gateway{ @@ -167,14 +180,15 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, o sharding: shardingStrategy, pendingTasks: makePendingTasks(pendingTasksInitialCap), workerConfig: workerConfig{ - maxWaitTime: 200 * time.Millisecond, - maxItems: 100, + maxWaitTime: 200 * time.Millisecond, + maxItems: 100, + processBlocksSequentially: false, }, workerMetrics: newWorkerMetrics(reg, constants.Loki, metricsSubsystem), queueMetrics: queue.NewMetrics(reg, constants.Loki, metricsSubsystem), } - g.queue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, time.Minute, g.queueMetrics) + g.queue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, time.Minute, &fixedQueueLimits{100}, g.queueMetrics) g.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(g.queueMetrics.Cleanup) client, err := bloomshipper.NewBloomClient(schemaCfg.Configs, storageCfg, cm) @@ -290,14 +304,14 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk g.activeUsers.UpdateUserTimestamp(tenantID, time.Now()) level.Info(g.logger).Log("msg", "enqueue task", "task", task.ID) - g.queue.Enqueue(tenantID, []string{}, task, 100, func() { + g.queue.Enqueue(tenantID, []string{}, task, func() { // When enqueuing, we also add the task to the pending tasks g.pendingTasks.Add(task.ID, task) }) requestCount := len(req.Refs) - // TODO(chaudum): Use pool - responses := make([]v1.Output, 0, requestCount) + responses := responsesPool.Get(requestCount) + defer responsesPool.Put(responses) for { select { diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index 697db210111d4..fd50a8c5fb2db 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -3,6 +3,7 @@ package bloomgateway import ( "context" "fmt" + "math/rand" "os" "testing" "time" @@ -254,74 +255,89 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { }) t.Run("use fuse queriers to filter chunks", func(t *testing.T) { - reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) - require.NoError(t, err) - - now := mktime("2023-10-03 10:00") - - // replace store implementation and re-initialize workers and sub-services - bqs, data := createBlockQueriers(t, 5, now.Add(-8*time.Hour), now, 0, 1024) - gw.bloomStore = newMockBloomStore(bqs) - err = gw.initServices() - require.NoError(t, err) - - err = services.StartAndAwaitRunning(context.Background(), gw) - require.NoError(t, err) - t.Cleanup(func() { - err = services.StopAndAwaitTerminated(context.Background(), gw) - require.NoError(t, err) - }) - - chunkRefs := createQueryInputFromBlockData(t, tenantID, data, 100) - - t.Run("no match - return empty response", func(t *testing.T) { - inputChunkRefs := groupRefs(t, chunkRefs) - req := &logproto.FilterChunkRefRequest{ - From: now.Add(-8 * time.Hour), - Through: now, - Refs: inputChunkRefs, - Filters: []*logproto.LineFilterExpression{ - {Operator: 1, Match: "does not match"}, - }, - } - ctx := user.InjectOrgID(context.Background(), tenantID) - res, err := gw.FilterChunkRefs(ctx, req) - require.NoError(t, err) - - expectedResponse := &logproto.FilterChunkRefResponse{ - ChunkRefs: []*logproto.GroupedChunkRefs{}, - } - require.Equal(t, expectedResponse, res) - }) + for _, tc := range []struct { + name string + value bool + }{ + {"sequentially", true}, + {"callback", false}, + } { + t.Run(tc.name, func(t *testing.T) { + + reg := prometheus.NewRegistry() + gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) + require.NoError(t, err) + + now := mktime("2023-10-03 10:00") + + // replace store implementation and re-initialize workers and sub-services + bqs, data := createBlockQueriers(t, 5, now.Add(-8*time.Hour), now, 0, 1024) + gw.bloomStore = newMockBloomStore(bqs) + gw.workerConfig.processBlocksSequentially = tc.value + err = gw.initServices() + require.NoError(t, err) + + t.Log("process blocks in worker sequentially", gw.workerConfig.processBlocksSequentially) + + err = services.StartAndAwaitRunning(context.Background(), gw) + require.NoError(t, err) + t.Cleanup(func() { + err = services.StopAndAwaitTerminated(context.Background(), gw) + require.NoError(t, err) + }) - t.Run("match - return filtered", func(t *testing.T) { - inputChunkRefs := groupRefs(t, chunkRefs) - // hack to get indexed key for a specific series - // the indexed key range for a series is defined as - // i * keysPerSeries ... i * keysPerSeries + keysPerSeries - 1 - // where i is the nth series in a block - // fortunately, i is also used as Checksum for the single chunk of a series - // see mkBasicSeriesWithBlooms() in pkg/storage/bloom/v1/test_util.go - key := inputChunkRefs[0].Refs[0].Checksum*1000 + 500 + chunkRefs := createQueryInputFromBlockData(t, tenantID, data, 100) + + t.Run("no match - return empty response", func(t *testing.T) { + inputChunkRefs := groupRefs(t, chunkRefs) + req := &logproto.FilterChunkRefRequest{ + From: now.Add(-8 * time.Hour), + Through: now, + Refs: inputChunkRefs, + Filters: []*logproto.LineFilterExpression{ + {Operator: 1, Match: "does not match"}, + }, + } + ctx := user.InjectOrgID(context.Background(), tenantID) + res, err := gw.FilterChunkRefs(ctx, req) + require.NoError(t, err) + + expectedResponse := &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{}, + } + require.Equal(t, expectedResponse, res) + }) - req := &logproto.FilterChunkRefRequest{ - From: now.Add(-8 * time.Hour), - Through: now, - Refs: inputChunkRefs, - Filters: []*logproto.LineFilterExpression{ - {Operator: 1, Match: fmt.Sprint(key)}, - }, - } - ctx := user.InjectOrgID(context.Background(), tenantID) - res, err := gw.FilterChunkRefs(ctx, req) - require.NoError(t, err) + t.Run("match - return filtered", func(t *testing.T) { + inputChunkRefs := groupRefs(t, chunkRefs) + // hack to get indexed key for a specific series + // the indexed key range for a series is defined as + // i * keysPerSeries ... i * keysPerSeries + keysPerSeries - 1 + // where i is the nth series in a block + // fortunately, i is also used as Checksum for the single chunk of a series + // see mkBasicSeriesWithBlooms() in pkg/storage/bloom/v1/test_util.go + key := inputChunkRefs[0].Refs[0].Checksum*1000 + 500 + + req := &logproto.FilterChunkRefRequest{ + From: now.Add(-8 * time.Hour), + Through: now, + Refs: inputChunkRefs, + Filters: []*logproto.LineFilterExpression{ + {Operator: 1, Match: fmt.Sprint(key)}, + }, + } + ctx := user.InjectOrgID(context.Background(), tenantID) + res, err := gw.FilterChunkRefs(ctx, req) + require.NoError(t, err) + + expectedResponse := &logproto.FilterChunkRefResponse{ + ChunkRefs: inputChunkRefs[:1], + } + require.Equal(t, expectedResponse, res) + }) - expectedResponse := &logproto.FilterChunkRefResponse{ - ChunkRefs: inputChunkRefs[:1], - } - require.Equal(t, expectedResponse, res) - }) + }) + } }) } @@ -358,6 +374,8 @@ type mockBloomStore struct { bqs []bloomshipper.BlockQuerierWithFingerprintRange } +var _ bloomshipper.Store = &mockBloomStore{} + // GetBlockQueriersForBlockRefs implements bloomshipper.Store. func (s *mockBloomStore) GetBlockQueriersForBlockRefs(_ context.Context, _ string, _ []bloomshipper.BlockRef) ([]bloomshipper.BlockQuerierWithFingerprintRange, error) { return s.bqs, nil @@ -385,6 +403,22 @@ func (s *mockBloomStore) GetBlockQueriers(_ context.Context, _ string, _, _ time func (s *mockBloomStore) Stop() {} +// ForEach implements bloomshipper.Store. +func (s *mockBloomStore) ForEach(_ context.Context, _ string, _ []bloomshipper.BlockRef, callback bloomshipper.ForEachBlockCallback) error { + shuffled := make([]bloomshipper.BlockQuerierWithFingerprintRange, len(s.bqs)) + _ = copy(shuffled, s.bqs) + + rand.Shuffle(len(shuffled), func(i, j int) { + shuffled[i], shuffled[j] = shuffled[j], shuffled[i] + }) + + for _, bq := range shuffled { + // ignore errors in the mock + _ = callback(bq.BlockQuerier, uint64(bq.MinFp), uint64(bq.MaxFp)) + } + return nil +} + func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.SeriesWithBloom, nthSeries int) []*logproto.ChunkRef { t.Helper() n := 0 diff --git a/pkg/bloomgateway/cache.go b/pkg/bloomgateway/cache.go new file mode 100644 index 0000000000000..fe40b87e95488 --- /dev/null +++ b/pkg/bloomgateway/cache.go @@ -0,0 +1,217 @@ +package bloomgateway + +import ( + "context" + "flag" + "sort" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" + "google.golang.org/grpc" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" +) + +const ( + cacheParalellism = 1 +) + +type CacheConfig struct { + resultscache.Config `yaml:",inline"` +} + +// RegisterFlags registers flags. +func (cfg *CacheConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("bloom-gateway-client.cache.", f) +} + +func (cfg *CacheConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.Config.RegisterFlagsWithPrefix(f, prefix) +} + +type CacheLimits interface { + resultscache.Limits + BloomGatewayCacheKeyInterval(tenantID string) time.Duration +} + +type keyGen struct { + CacheLimits +} + +func newCacheKeyGen(limits CacheLimits) keyGen { + return keyGen{limits} +} + +func (k keyGen) GenerateCacheKey(ctx context.Context, tenant string, r resultscache.Request) string { + return resultscache.ConstSplitter(k.BloomGatewayCacheKeyInterval(tenant)).GenerateCacheKey(ctx, tenant, r) +} + +type extractor struct{} + +func newExtractor() extractor { + return extractor{} +} + +// Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds. +// We remove chunks that are not within the given time range. +func (e extractor) Extract(start, end int64, r resultscache.Response, _, _ int64) resultscache.Response { + res := r.(*logproto.FilterChunkRefResponse) + + chunkRefs := make([]*logproto.GroupedChunkRefs, 0, len(res.ChunkRefs)) + for _, chunkRef := range res.ChunkRefs { + refs := make([]*logproto.ShortRef, 0, len(chunkRef.Refs)) + for _, ref := range chunkRef.Refs { + if model.Time(end) < ref.From || ref.Through <= model.Time(start) { + continue + } + refs = append(refs, ref) + } + if len(refs) > 0 { + chunkRefs = append(chunkRefs, &logproto.GroupedChunkRefs{ + Fingerprint: chunkRef.Fingerprint, + Tenant: chunkRef.Tenant, + Refs: refs, + }) + } + } + + return &logproto.FilterChunkRefResponse{ + ChunkRefs: chunkRefs, + } +} + +type merger struct{} + +func newMerger() merger { + return merger{} +} + +// MergeResponse merges responses from multiple requests into a single Response +// We merge all chunks grouped by their fingerprint. +func (m merger) MergeResponse(responses ...resultscache.Response) (resultscache.Response, error) { + var size int + for _, r := range responses { + res := r.(*logproto.FilterChunkRefResponse) + size += len(res.ChunkRefs) + } + + chunkRefs := make([]*logproto.GroupedChunkRefs, 0, size) + for _, r := range responses { + res := r.(*logproto.FilterChunkRefResponse) + chunkRefs = append(chunkRefs, res.ChunkRefs...) + } + + return &logproto.FilterChunkRefResponse{ + ChunkRefs: mergeGroupedChunkRefs(chunkRefs), + }, nil +} + +// Merge duplicated fingerprints by: +// 1. Sort the chunkRefs by their stream fingerprint +// 2. Remove duplicated FPs appending all chunks into the first fingerprint's chunk list. +func mergeGroupedChunkRefs(chunkRefs []*logproto.GroupedChunkRefs) []*logproto.GroupedChunkRefs { + if len(chunkRefs) <= 1 { + return chunkRefs + } + + sort.Slice(chunkRefs, func(i, j int) bool { + return chunkRefs[i].Fingerprint < chunkRefs[j].Fingerprint + }) + + var lastDiffFP int + for i := 1; i < len(chunkRefs); i++ { + if chunkRefs[lastDiffFP].Fingerprint == chunkRefs[i].Fingerprint { + chunkRefs[lastDiffFP].Refs = mergeShortRefs(append(chunkRefs[lastDiffFP].Refs, chunkRefs[i].Refs...)) + } else { + lastDiffFP++ + chunkRefs[lastDiffFP] = chunkRefs[i] + } + } + return chunkRefs[:lastDiffFP+1] +} + +// mergeShortRefs merges short-refs by removing duplicated checksums. +func mergeShortRefs(refs []*logproto.ShortRef) []*logproto.ShortRef { + if len(refs) <= 1 { + return refs + } + + sort.Slice(refs, func(i, j int) bool { + return refs[i].Checksum < refs[j].Checksum + }) + return slices.CompactFunc(refs, func(a, b *logproto.ShortRef) bool { + return a.Checksum == b.Checksum + }) +} + +type ClientCache struct { + cache *resultscache.ResultsCache + limits CacheLimits + logger log.Logger +} + +func NewBloomGatewayClientCacheMiddleware( + logger log.Logger, + next logproto.BloomGatewayClient, + c cache.Cache, + limits CacheLimits, + cacheGen resultscache.CacheGenNumberLoader, + retentionEnabled bool, +) *ClientCache { + nextAsHandler := resultscache.HandlerFunc(func(ctx context.Context, cacheReq resultscache.Request) (resultscache.Response, error) { + req := cacheReq.(requestWithGrpcCallOptions) + return next.FilterChunkRefs(ctx, req.FilterChunkRefRequest, req.grpcCallOptions...) + }) + + resultsCache := resultscache.NewResultsCache( + logger, + c, + nextAsHandler, + newCacheKeyGen(limits), + limits, + newMerger(), + newExtractor(), + nil, + nil, + func(_ context.Context, _ []string, _ resultscache.Request) int { + return cacheParalellism + }, + cacheGen, + retentionEnabled, + ) + + return &ClientCache{ + cache: resultsCache, + limits: limits, + logger: logger, + } +} + +func (c *ClientCache) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunkRefRequest, opts ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) { + cacheReq := requestWithGrpcCallOptions{ + FilterChunkRefRequest: req, + grpcCallOptions: opts, + } + res, err := c.cache.Do(ctx, cacheReq) + if err != nil { + return nil, err + } + + return res.(*logproto.FilterChunkRefResponse), nil +} + +type requestWithGrpcCallOptions struct { + *logproto.FilterChunkRefRequest + grpcCallOptions []grpc.CallOption +} + +func (r requestWithGrpcCallOptions) WithStartEndForCache(start time.Time, end time.Time) resultscache.Request { + return requestWithGrpcCallOptions{ + FilterChunkRefRequest: r.FilterChunkRefRequest.WithStartEndForCache(start, end).(*logproto.FilterChunkRefRequest), + grpcCallOptions: r.grpcCallOptions, + } +} diff --git a/pkg/bloomgateway/cache_test.go b/pkg/bloomgateway/cache_test.go new file mode 100644 index 0000000000000..5a66162000a46 --- /dev/null +++ b/pkg/bloomgateway/cache_test.go @@ -0,0 +1,494 @@ +package bloomgateway + +import ( + "context" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/grafana/dskit/user" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" + "github.com/grafana/loki/pkg/util/constants" +) + +// Range is 1000-4000 +var templateResponse = &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 1, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + { + Fingerprint: 2, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 3000, + Through: 4000, + Checksum: 30, + }, + { + From: 1000, + Through: 3000, + Checksum: 40, + }, + }, + }, + }, +} + +func TestExtract(t *testing.T) { + for _, tc := range []struct { + name string + start int64 + end int64 + input *logproto.FilterChunkRefResponse + expected *logproto.FilterChunkRefResponse + }{ + { + name: "start and end out of range", + start: 100, + end: 200, + input: templateResponse, + expected: &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{}, + }, + }, + { + name: "start spans exact range", + start: 1000, + end: 4000, + input: templateResponse, + expected: templateResponse, + }, + { + name: "start spans more than range", + start: 100, + end: 5000, + input: templateResponse, + expected: templateResponse, + }, + { + name: "start and end within range", + start: 1700, + end: 2700, + input: templateResponse, + expected: &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 1, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + { + Fingerprint: 2, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 3000, + Checksum: 40, + }, + }, + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + e := newExtractor() + actual := e.Extract(tc.start, tc.end, tc.input, 0, 0) + require.Equal(t, tc.expected, actual) + }) + } +} + +func TestMerge(t *testing.T) { + for _, tc := range []struct { + name string + input []*logproto.FilterChunkRefResponse + expected *logproto.FilterChunkRefResponse + }{ + { + name: "empy input", + input: []*logproto.FilterChunkRefResponse{}, + expected: &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{}, + }, + }, + { + name: "single input", + input: []*logproto.FilterChunkRefResponse{templateResponse}, + expected: templateResponse, + }, + { + name: "repeating and non-repeating fingerprint with repeating and non-repeating chunks", + input: []*logproto.FilterChunkRefResponse{ + { + ChunkRefs: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 1, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + { + Fingerprint: 2, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + }, + }, + { + ChunkRefs: []*logproto.GroupedChunkRefs{ + // Same FP as in previous input and same chunks + { + Fingerprint: 1, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + // Same FP as in previous input, but different chunks + { + Fingerprint: 2, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + // Same chunk as in previous input + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + // New chunk + { + From: 2000, + Through: 2500, + Checksum: 30, + }, + }, + }, + // New FP + { + Fingerprint: 3, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + }, + }, + { + ChunkRefs: []*logproto.GroupedChunkRefs{ + // Same FP as in previous input and diff chunks + { + Fingerprint: 2, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 700, + Through: 1000, + Checksum: 40, + }, + { + From: 2000, + Through: 2700, + Checksum: 50, + }, + }, + }, + }, + }, + }, + expected: &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 1, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + { + Fingerprint: 2, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + { + From: 2000, + Through: 2500, + Checksum: 30, + }, + { + From: 700, + Through: 1000, + Checksum: 40, + }, + { + From: 2000, + Through: 2700, + Checksum: 50, + }, + }, + }, + { + Fingerprint: 3, + Tenant: "fake", + Refs: []*logproto.ShortRef{ + { + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + From: 1500, + Through: 2500, + Checksum: 20, + }, + }, + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + input := make([]resultscache.Response, 0, len(tc.input)) + for _, i := range tc.input { + input = append(input, i) + } + + m := newMerger() + actual, err := m.MergeResponse(input...) + require.NoError(t, err) + require.Equal(t, tc.expected, actual) + }) + } +} + +func TestCache(t *testing.T) { + ctx := user.InjectOrgID(context.Background(), "fake") + + limits := mockLimits{ + cacheInterval: 15 * time.Minute, + } + + cfg := CacheConfig{ + Config: resultscache.Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, + }, + } + c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.BloomFilterCache, constants.Loki) + require.NoError(t, err) + defer c.Stop() + + chunkRefs := []*logproto.ChunkRef{ + { + Fingerprint: 2, + UserID: "fake", + From: 1500, + Through: 2500, + Checksum: 30, + }, + { + Fingerprint: 3, + UserID: "fake", + From: 2500, + Through: 3500, + }, + } + req := &logproto.FilterChunkRefRequest{ + From: model.Time(2000), + Through: model.Time(3000), + Refs: groupRefs(t, chunkRefs), + Filters: []*logproto.LineFilterExpression{ + {Operator: 1, Match: "foo"}, + }, + } + expectedRes := &logproto.FilterChunkRefResponse{ + ChunkRefs: groupRefs(t, chunkRefs), + } + + server, calls := newMockServer(expectedRes) + + cacheMiddleware := NewBloomGatewayClientCacheMiddleware( + log.NewNopLogger(), + server, + c, + limits, + nil, + false, + ) + + // First call should go to the server + *calls = 0 + res, err := cacheMiddleware.FilterChunkRefs(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, *calls) + require.Equal(t, expectedRes, res) + + // Second call should go to the cache + *calls = 0 + res, err = cacheMiddleware.FilterChunkRefs(ctx, req) + require.NoError(t, err) + require.Equal(t, 0, *calls) + require.Equal(t, expectedRes, res) + + // Doing a request with new start and end should: + // 1. hit the server the leading time + // 2. hit the cache the cached span + // 3. hit the server for the trailing time + newChunkRefs := []*logproto.ChunkRef{ + { + Fingerprint: 1, + UserID: "fake", + From: 1000, + Through: 1500, + Checksum: 10, + }, + { + Fingerprint: 4, + UserID: "fake", + From: 3500, + Through: 4500, + }, + } + server.SetResponse(&logproto.FilterChunkRefResponse{ + ChunkRefs: groupRefs(t, newChunkRefs), + }) + expectedRes = &logproto.FilterChunkRefResponse{ + ChunkRefs: groupRefs(t, append(chunkRefs, newChunkRefs...)), + } + req.From = model.Time(100) + req.Through = model.Time(5000) + *calls = 0 + res, err = cacheMiddleware.FilterChunkRefs(ctx, req) + require.NoError(t, err) + require.Equal(t, 2, *calls) + require.Equal(t, expectedRes, res) + + // Doing a request again should only hit the cache + *calls = 0 + res, err = cacheMiddleware.FilterChunkRefs(ctx, req) + require.NoError(t, err) + require.Equal(t, 0, *calls) + require.Equal(t, expectedRes, res) +} + +type mockServer struct { + calls *int + res *logproto.FilterChunkRefResponse +} + +func newMockServer(res *logproto.FilterChunkRefResponse) (*mockServer, *int) { + var calls int + return &mockServer{ + calls: &calls, + res: res, + }, &calls +} + +func (s *mockServer) SetResponse(res *logproto.FilterChunkRefResponse) { + s.res = res +} + +func (s *mockServer) FilterChunkRefs(_ context.Context, _ *logproto.FilterChunkRefRequest, _ ...grpc.CallOption) (*logproto.FilterChunkRefResponse, error) { + *s.calls++ + return s.res, nil +} + +type mockLimits struct { + cacheFreshness time.Duration + cacheInterval time.Duration +} + +func (m mockLimits) MaxCacheFreshness(_ context.Context, _ string) time.Duration { + return m.cacheFreshness +} + +func (m mockLimits) BloomGatewayCacheKeyInterval(_ string) time.Duration { + return m.cacheInterval +} diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go index 9e43a32d08e76..cfbb6c60284ec 100644 --- a/pkg/bloomgateway/client.go +++ b/pkg/bloomgateway/client.go @@ -7,6 +7,8 @@ import ( "io" "math" "math/rand" + "sort" + "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -23,10 +25,42 @@ import ( "github.com/grafana/loki/pkg/distributor/clientpool" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/util" + "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/queue" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" + "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util/constants" ) +var ( + // groupedChunksRefPool pooling slice of logproto.GroupedChunkRefs [64, 128, 256, ..., 65536] + groupedChunksRefPool = queue.NewSlicePool[*logproto.GroupedChunkRefs](1<<6, 1<<16, 2) + // ringGetBuffersPool pooling for ringGetBuffers to avoid calling ring.MakeBuffersForGet() for each request + ringGetBuffersPool = sync.Pool{ + New: func() interface{} { + descs, hosts, zones := ring.MakeBuffersForGet() + return &ringGetBuffers{ + Descs: descs, + Hosts: hosts, + Zones: zones, + } + }, + } +) + +type ringGetBuffers struct { + Descs []ring.InstanceDesc + Hosts []string + Zones []string +} + +func (buf *ringGetBuffers) Reset() { + buf.Descs = buf.Descs[:0] + buf.Hosts = buf.Hosts[:0] + buf.Zones = buf.Zones[:0] +} + // GRPCPool represents a pool of gRPC connections to different bloom gateway instances. // Interfaces are inlined for simplicity to automatically satisfy interface functions. type GRPCPool struct { @@ -68,6 +102,10 @@ type ClientConfig struct { // Ring is the Bloom Gateway ring used to find the appropriate Bloom Gateway instance // this client should talk to. Ring ring.ReadRing `yaml:"-"` + + // Cache configures the cache used to store the results of the Bloom Gateway server. + Cache CacheConfig `yaml:"results_cache,omitempty"` + CacheResults bool `yaml:"cache_results"` } // RegisterFlags registers flags for the Bloom Gateway client configuration. @@ -78,9 +116,25 @@ func (i *ClientConfig) RegisterFlags(f *flag.FlagSet) { // RegisterFlagsWithPrefix registers flags for the Bloom Gateway client configuration with a common prefix. func (i *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { i.GRPCClientConfig.RegisterFlagsWithPrefix(prefix+"grpc", f) + i.Cache.RegisterFlagsWithPrefix(prefix+"cache.", f) + f.BoolVar(&i.CacheResults, prefix+"cache_results", false, "Flag to control whether to cache bloom gateway client requests/responses.") f.BoolVar(&i.LogGatewayRequests, prefix+"log-gateway-requests", false, "Flag to control whether requests sent to the gateway should be logged or not.") } +func (i *ClientConfig) Validate() error { + if err := i.GRPCClientConfig.Validate(); err != nil { + return errors.Wrap(err, "grpc client config") + } + + if i.CacheResults { + if err := i.Cache.Validate(); err != nil { + return errors.Wrap(err, "cache config") + } + } + + return nil +} + type Client interface { FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, filters ...*logproto.LineFilterExpression) ([]*logproto.GroupedChunkRefs, error) } @@ -93,7 +147,15 @@ type GatewayClient struct { ring ring.ReadRing } -func NewGatewayClient(cfg ClientConfig, limits Limits, registerer prometheus.Registerer, logger log.Logger, metricsNamespace string) (*GatewayClient, error) { +func NewGatewayClient( + cfg ClientConfig, + limits Limits, + registerer prometheus.Registerer, + logger log.Logger, + metricsNamespace string, + cacheGen resultscache.CacheGenNumberLoader, + retentionEnabled bool, +) (*GatewayClient, error) { latency := promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ Namespace: constants.Loki, Subsystem: "bloom_gateway", @@ -107,22 +169,43 @@ func NewGatewayClient(cfg ClientConfig, limits Limits, registerer prometheus.Reg return nil, err } + var c cache.Cache + if cfg.CacheResults { + c, err = cache.New(cfg.Cache.CacheConfig, registerer, logger, stats.BloomFilterCache, constants.Loki) + if err != nil { + return nil, errors.Wrap(err, "new bloom gateway cache") + } + if cfg.Cache.Compression == "snappy" { + c = cache.NewSnappy(c, logger) + } + } + poolFactory := func(addr string) (ringclient.PoolClient, error) { pool, err := NewBloomGatewayGRPCPool(addr, dialOpts) if err != nil { return nil, errors.Wrap(err, "new bloom gateway grpc pool") } + + if cfg.CacheResults { + pool.BloomGatewayClient = NewBloomGatewayClientCacheMiddleware( + logger, + pool.BloomGatewayClient, + c, + limits, + cacheGen, + retentionEnabled, + ) + } + return pool, nil } - c := &GatewayClient{ + return &GatewayClient{ cfg: cfg, logger: logger, limits: limits, pool: clientpool.NewPool("bloom-gateway", cfg.PoolConfig, cfg.Ring, ringclient.PoolAddrFunc(poolFactory), logger, metricsNamespace), - } - - return c, nil + }, nil } func shuffleAddrs(addrs []string) []string { @@ -138,27 +221,28 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, t return groups, nil } - // Get the addresses of corresponding bloom gateways for each series. - fingerprints, addrs, err := c.serverAddrsForFingerprints(tenant, groups) + subRing := GetShuffleShardingSubring(c.ring, tenant, c.limits) + rs, err := subRing.GetAllHealthy(BlocksRead) if err != nil { - return nil, err + return nil, errors.Wrap(err, "bloom gateway get healthy instances") } - // Group chunk refs by addresses of one or more bloom gateways. - // All chunk refs of series that belong to one and the same bloom gateway are set in one batch. - streamsByAddr := c.groupStreamsByAddr(groups, addrs) + streamsByInst, err := c.groupFingerprintsByServer(groups, subRing, rs.Instances) + if err != nil { + return nil, err + } - // TODO(chaudum): We might over-allocate for the filtered responses here? - filteredChunkRefs := make([]*logproto.GroupedChunkRefs, 0, len(fingerprints)) + filteredChunkRefs := groupedChunksRefPool.Get(len(groups)) + defer groupedChunksRefPool.Put(filteredChunkRefs) - for _, item := range streamsByAddr { + for _, item := range streamsByInst { // randomize order of addresses so we don't hotspot the first server in the list - addrs := shuffleAddrs(item.addrs) + addrs := shuffleAddrs(item.instance.addrs) err := c.doForAddrs(addrs, func(client logproto.BloomGatewayClient) error { req := &logproto.FilterChunkRefRequest{ From: from, Through: through, - Refs: item.refs, + Refs: item.fingerprints, Filters: filters, } resp, err := client.FilterChunkRefs(ctx, req) @@ -175,53 +259,6 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, t return filteredChunkRefs, nil } -// isEqualStringElements checks if two string slices contain the same elements. -// The order of the elements is ignored. -func isEqualStringElements(a, b []string) bool { - if len(a) != len(b) { - return false - } - for _, s := range a { - if !util.StringsContain(b, s) { - return false - } - } - return true -} - -// listContainsAddrs checks if a slice of chunkRefAddrs contains an element -// whos field addrs contains the same addresses as the given slice of -// addresses. -// It returns the index of the element, if found, and a boolean whether the -// given list contains the given addrs. -func listContainsAddrs(list []chunkRefsByAddrs, addrs []string) (int, bool) { - for i, r := range list { - if isEqualStringElements(r.addrs, addrs) { - return i, true - } - } - return -1, false -} - -type chunkRefsByAddrs struct { - addrs []string - refs []*logproto.GroupedChunkRefs -} - -func (c *GatewayClient) groupStreamsByAddr(groups []*logproto.GroupedChunkRefs, addresses [][]string) []chunkRefsByAddrs { - res := make([]chunkRefsByAddrs, 0, len(addresses)) - for i := 0; i < len(addresses); i++ { - addrs := addresses[i] - refs := groups[i] - if idx, ok := listContainsAddrs(res, addrs); ok { - res[idx].refs = append(res[idx].refs, refs) - } else { - res = append(res, chunkRefsByAddrs{addrs: addrs, refs: []*logproto.GroupedChunkRefs{refs}}) - } - } - return res -} - // doForAddrs sequetially calls the provided callback function fn for each // address in given slice addrs until the callback function does not return an // error. @@ -245,47 +282,180 @@ func (c *GatewayClient) doForAddrs(addrs []string, fn func(logproto.BloomGateway return err } -// serverAddrsForFingerprints returns a slices of server address slices for -// each fingerprint of given fingerprints. -// The indexes of the returned slices correspond to each other. -// Returns an error in case the bloom gateway ring could not get the -// corresponding replica set for a given fingerprint. -// Warning: This function becomes inefficient when the number of fingerprints is very large. -func (c *GatewayClient) serverAddrsForFingerprints(tenantID string, groups []*logproto.GroupedChunkRefs) ([]uint64, [][]string, error) { - subRing := GetShuffleShardingSubring(c.ring, tenantID, c.limits) +func (c *GatewayClient) groupFingerprintsByServer(groups []*logproto.GroupedChunkRefs, subRing ring.ReadRing, instances []ring.InstanceDesc) ([]instanceWithFingerprints, error) { + bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet() - rs, err := subRing.GetAllHealthy(BlocksRead) - if err != nil { - return nil, nil, errors.Wrap(err, "bloom gateway get healthy instances") + servers := make([]addrsWithTokenRange, 0, len(instances)) + prev := -1 + it := newInstanceSortMergeIterator(instances) + for it.Next() { + // We can use on of the tokens from the token range + // to obtain all addresses for that token. + rs, err := subRing.Get(it.At().token, BlocksRead, bufDescs, bufHosts, bufZones) + if err != nil { + return nil, errors.Wrap(err, "bloom gateway get ring") + } + servers = append(servers, addrsWithTokenRange{ + minToken: uint32(prev + 1), + maxToken: it.At().token, + id: it.At().instance.Id, + addrs: rs.GetAddresses(), + }) + prev = int(it.At().token) } - var numTokens int - for _, instanceDesc := range rs.Instances { - numTokens += len(instanceDesc.Tokens) + if len(servers) > 0 { + // append the instance for the token range between the greates token and MaxUint32 + servers = append(servers, addrsWithTokenRange{ + minToken: uint32(prev), + maxToken: math.MaxUint32, + addrs: servers[0].addrs, + id: servers[0].id, + }) } - numFingerprints := len(groups) - if numFingerprints > int(float64(numTokens)*math.Log2(float64(numFingerprints))) { - // TODO(chaudum): Implement algorithm in O(n * m * log(k) + n) instead of O(k) by iterating over ring tokens - // and finding corresponding fingerprint ranges using binary search. - // n .. number of instances - // m .. number of tokens per instance - // k .. number of fingerprints - level.Warn(c.logger).Log("msg", "using an inefficient algorithm to determin server addresses for fingerprints", "fingerprints", numFingerprints, "tokens", numTokens) + boundedFingerprints := partitionFingerprintsByAddresses(groups, servers) + return groupByInstance(boundedFingerprints), nil +} + +type instanceWithToken struct { + instance ring.InstanceDesc + token uint32 +} + +type addrsWithTokenRange struct { + id string + addrs []string + minToken, maxToken uint32 +} + +func (s addrsWithTokenRange) cmp(token uint32) v1.BoundsCheck { + if token < s.minToken { + return v1.Before + } else if token > s.maxToken { + return v1.After } + return v1.Overlap +} - fingerprints := make([]uint64, numFingerprints) - addresses := make([][]string, numFingerprints) - bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet() +type instanceWithFingerprints struct { + instance addrsWithTokenRange + fingerprints []*logproto.GroupedChunkRefs +} - for idx, key := range groups { - rs, err = subRing.Get(uint32(key.Fingerprint), BlocksRead, bufDescs, bufHosts, bufZones) - if err != nil { - return nil, nil, errors.Wrap(err, "bloom gateway get ring") +func partitionFingerprintsByAddresses(fingerprints []*logproto.GroupedChunkRefs, addresses []addrsWithTokenRange) (result []instanceWithFingerprints) { + for _, instance := range addresses { + + min := sort.Search(len(fingerprints), func(i int) bool { + return instance.cmp(uint32(fingerprints[i].Fingerprint)) > v1.Before + }) + + max := sort.Search(len(fingerprints), func(i int) bool { + return instance.cmp(uint32(fingerprints[i].Fingerprint)) == v1.After + }) + + // fingerprint is out of boundaries + if min == len(fingerprints) || max == 0 { + continue + } + + result = append(result, instanceWithFingerprints{instance: instance, fingerprints: fingerprints[min:max]}) + } + + return result +} + +// groupByInstance groups fingerprints by server instance +func groupByInstance(boundedFingerprints []instanceWithFingerprints) []instanceWithFingerprints { + if len(boundedFingerprints) == 0 { + return []instanceWithFingerprints{} + } + + result := make([]instanceWithFingerprints, 0, len(boundedFingerprints)) + pos := make(map[string]int, len(boundedFingerprints)) + + for _, cur := range boundedFingerprints { + if len(cur.fingerprints) == 0 { + continue } - fingerprints[idx] = key.Fingerprint - addresses[idx] = rs.GetAddresses() + // Copy fingerprint slice, otherwise we mutate the original + // TODO(chaudum): Use SlicePool + tmp := make([]*logproto.GroupedChunkRefs, len(cur.fingerprints)) + _ = copy(tmp, cur.fingerprints) + + idx, ok := pos[cur.instance.id] + if ok { + result[idx].fingerprints = append(result[idx].fingerprints, tmp...) + continue + } + + pos[cur.instance.id] = len(result) + result = append(result, instanceWithFingerprints{ + instance: addrsWithTokenRange{ + id: cur.instance.id, + addrs: cur.instance.addrs, + }, + fingerprints: tmp, + }) + } + + return result +} + +// newInstanceSortMergeIterator creates an iterator that yields instanceWithToken elements +// where the token of the elements are sorted in ascending order. +func newInstanceSortMergeIterator(instances []ring.InstanceDesc) v1.Iterator[instanceWithToken] { + it := &sortMergeIterator[ring.InstanceDesc, uint32, instanceWithToken]{ + items: instances, + transform: func(item ring.InstanceDesc, val uint32) instanceWithToken { + return instanceWithToken{instance: item, token: val} + }, + } + sequences := make([]v1.PeekingIterator[IndexedValue[uint32]], 0, len(instances)) + for i := range instances { + sort.Slice(instances[i].Tokens, func(a, b int) bool { + return instances[i].Tokens[a] < instances[i].Tokens[b] + }) + iter := NewIterWithIndex[uint32](v1.NewSliceIter(instances[i].Tokens), i) + sequences = append(sequences, v1.NewPeekingIter[IndexedValue[uint32]](iter)) + } + it.heap = v1.NewHeapIterator( + func(i, j IndexedValue[uint32]) bool { + return i.val < j.val + }, + sequences..., + ) + it.err = nil + + return it +} + +// sortMergeIterator implements v1.Iterator +type sortMergeIterator[T any, C comparable, R any] struct { + curr R + heap *v1.HeapIterator[IndexedValue[C]] + items []T + transform func(T, C) R + err error +} + +func (it *sortMergeIterator[T, C, R]) Next() bool { + ok := it.heap.Next() + if !ok { + it.err = io.EOF + return false } - return fingerprints, addresses, nil + group := it.heap.At() + it.curr = it.transform(it.items[group.idx], group.val) + + return true +} + +func (it *sortMergeIterator[T, C, R]) At() R { + return it.curr +} + +func (it *sortMergeIterator[T, C, R]) Err() error { + return it.err } diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go index 670c050517163..f0d5b2edf5c07 100644 --- a/pkg/bloomgateway/client_test.go +++ b/pkg/bloomgateway/client_test.go @@ -1,10 +1,13 @@ package bloomgateway import ( + "sort" "testing" + "time" "github.com/go-kit/log" "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/ring" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -24,12 +27,163 @@ func TestBloomGatewayClient(t *testing.T) { flagext.DefaultValues(&cfg) t.Run("", func(t *testing.T) { - _, err := NewGatewayClient(cfg, l, reg, logger, "loki") + _, err := NewGatewayClient(cfg, l, reg, logger, "loki", nil, false) require.NoError(t, err) }) } -func TestBloomGatewayClient_GroupStreamsByAddresses(t *testing.T) { +func TestBloomGatewayClient_SortInstancesByToken(t *testing.T) { + input := []ring.InstanceDesc{ + {Id: "1", Tokens: []uint32{6, 5, 2, 9}}, + {Id: "2", Tokens: []uint32{3, 4, 7}}, + {Id: "3", Tokens: []uint32{1, 8, 0}}, + } + expected := []instanceWithToken{ + {instance: input[2], token: 0}, + {instance: input[2], token: 1}, + {instance: input[0], token: 2}, + {instance: input[1], token: 3}, + {instance: input[1], token: 4}, + {instance: input[0], token: 5}, + {instance: input[0], token: 6}, + {instance: input[1], token: 7}, + {instance: input[2], token: 8}, + {instance: input[0], token: 9}, + } + + var i int + it := newInstanceSortMergeIterator(input) + for it.Next() { + require.Equal(t, expected[i], it.At()) + i++ + } +} + +func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) { + // instance token ranges do not overlap + t.Run("non-overlapping", func(t *testing.T) { + groups := []*logproto.GroupedChunkRefs{ + {Fingerprint: 0}, + {Fingerprint: 100}, + {Fingerprint: 101}, + {Fingerprint: 200}, + {Fingerprint: 201}, + {Fingerprint: 300}, + {Fingerprint: 301}, + {Fingerprint: 400}, + {Fingerprint: 401}, // out of bounds, will be dismissed + } + servers := []addrsWithTokenRange{ + {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 100}, + {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 101, maxToken: 200}, + {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: 201, maxToken: 300}, + {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 301, maxToken: 400}, + } + + // partition fingerprints + + expected := []instanceWithFingerprints{ + { + instance: servers[0], + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0}, + {Fingerprint: 100}, + }, + }, + { + instance: servers[1], + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 101}, + {Fingerprint: 200}, + }, + }, + { + instance: servers[2], + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 201}, + {Fingerprint: 300}, + }, + }, + { + instance: servers[3], + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 301}, + {Fingerprint: 400}, + }, + }, + } + + bounded := partitionFingerprintsByAddresses(groups, servers) + require.Equal(t, expected, bounded) + + // group fingerprints by instance + + expected = []instanceWithFingerprints{ + { + instance: addrsWithTokenRange{id: "instance-1", addrs: []string{"10.0.0.1"}}, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0}, + {Fingerprint: 100}, + }, + }, + { + instance: addrsWithTokenRange{id: "instance-2", addrs: []string{"10.0.0.2"}}, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 101}, + {Fingerprint: 200}, + {Fingerprint: 301}, + {Fingerprint: 400}, + }, + }, + { + instance: addrsWithTokenRange{id: "instance-3", addrs: []string{"10.0.0.3"}}, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 201}, + {Fingerprint: 300}, + }, + }, + } + result := groupByInstance(bounded) + require.Equal(t, expected, result) + }) + + // instance token ranges overlap + t.Run("overlapping", func(t *testing.T) { + groups := []*logproto.GroupedChunkRefs{ + {Fingerprint: 50}, + {Fingerprint: 150}, + {Fingerprint: 250}, + {Fingerprint: 350}, + } + servers := []addrsWithTokenRange{ + {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 200}, + {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 100, maxToken: 300}, + {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: 200, maxToken: 400}, + } + + // partition fingerprints + + expected := []instanceWithFingerprints{ + {instance: servers[0], fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 50}, + {Fingerprint: 150}, + }}, + {instance: servers[1], fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 150}, + {Fingerprint: 250}, + }}, + {instance: servers[2], fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 250}, + {Fingerprint: 350}, + }}, + } + + bounded := partitionFingerprintsByAddresses(groups, servers) + require.Equal(t, expected, bounded) + }) +} + +func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) { logger := log.NewNopLogger() reg := prometheus.NewRegistry() @@ -40,75 +194,212 @@ func TestBloomGatewayClient_GroupStreamsByAddresses(t *testing.T) { cfg := ClientConfig{} flagext.DefaultValues(&cfg) - c, err := NewGatewayClient(cfg, l, reg, logger, "loki") + c, err := NewGatewayClient(cfg, l, reg, logger, "loki", nil, false) require.NoError(t, err) + instances := []ring.InstanceDesc{ + {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{2146405214, 1029997044, 678878693}}, + {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{296463531, 1697323986, 800258284}}, + {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{2014002871, 315617625, 1036168527}}, + } + + it := newInstanceSortMergeIterator(instances) + for it.Next() { + t.Log(it.At().token, it.At().instance.Addr) + } + testCases := []struct { - name string - chunks []*logproto.GroupedChunkRefs - addresses [][]string - expected []chunkRefsByAddrs + name string + chunks []*logproto.GroupedChunkRefs + expected []instanceWithFingerprints }{ { - name: "empty input yields empty result", - chunks: []*logproto.GroupedChunkRefs{}, - addresses: [][]string{}, - expected: []chunkRefsByAddrs{}, + name: "empty input yields empty result", + chunks: []*logproto.GroupedChunkRefs{}, + expected: []instanceWithFingerprints{}, }, { - name: "addresses with same elements are grouped into single item", + name: "fingerprints within a single token range are grouped", chunks: []*logproto.GroupedChunkRefs{ - {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}}, - {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}}, - {Fingerprint: 3, Refs: []*logproto.ShortRef{{Checksum: 3}}}, + {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}}, + {Fingerprint: 1000000001, Refs: []*logproto.ShortRef{{Checksum: 2}}}, }, - addresses: [][]string{ - {"10.0.0.1", "10.0.0.2", "10.0.0.3"}, - {"10.0.0.2", "10.0.0.3", "10.0.0.1"}, - {"10.0.0.3", "10.0.0.1", "10.0.0.2"}, - }, - expected: []chunkRefsByAddrs{ + expected: []instanceWithFingerprints{ { - addrs: []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"}, - refs: []*logproto.GroupedChunkRefs{ - {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}}, - {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}}, - {Fingerprint: 3, Refs: []*logproto.ShortRef{{Checksum: 3}}}, + instance: addrsWithTokenRange{ + id: "instance-1", + addrs: []string{"10.0.0.1"}, + }, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}}, + {Fingerprint: 1000000001, Refs: []*logproto.ShortRef{{Checksum: 2}}}, }, }, }, }, { - name: "partially overlapping addresses are not grouped together", + name: "fingerprints within multiple token ranges of a single instance are grouped", chunks: []*logproto.GroupedChunkRefs{ - {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}}, - {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}}, + {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}}, + {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}}, }, - addresses: [][]string{ - {"10.0.0.1", "10.0.0.2"}, - {"10.0.0.2", "10.0.0.3"}, + expected: []instanceWithFingerprints{ + { + instance: addrsWithTokenRange{ + id: "instance-1", + addrs: []string{"10.0.0.1"}, + }, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}}, + {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}}, + }, + }, + }, + }, + { + name: "fingerprints with token ranges of a multiple instance are grouped", + chunks: []*logproto.GroupedChunkRefs{ + // instance 1 + {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}}, + // instance 1 + {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}}, + // instance 2 + {Fingerprint: 290000000, Refs: []*logproto.ShortRef{{Checksum: 3}}}, + // instance 2 (fingerprint equals instance token) + {Fingerprint: 800258284, Refs: []*logproto.ShortRef{{Checksum: 4}}}, + // instance 2 (fingerprint greater than greatest token) + {Fingerprint: 2147483648, Refs: []*logproto.ShortRef{{Checksum: 5}}}, + // instance 3 + {Fingerprint: 1029997045, Refs: []*logproto.ShortRef{{Checksum: 6}}}, }, - expected: []chunkRefsByAddrs{ + expected: []instanceWithFingerprints{ { - addrs: []string{"10.0.0.1", "10.0.0.2"}, - refs: []*logproto.GroupedChunkRefs{ - {Fingerprint: 1, Refs: []*logproto.ShortRef{{Checksum: 1}}}, + instance: addrsWithTokenRange{ + id: "instance-2", + addrs: []string{"10.0.0.2"}, + }, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 290000000, Refs: []*logproto.ShortRef{{Checksum: 3}}}, + {Fingerprint: 800258284, Refs: []*logproto.ShortRef{{Checksum: 4}}}, + {Fingerprint: 2147483648, Refs: []*logproto.ShortRef{{Checksum: 5}}}, }, }, { - addrs: []string{"10.0.0.2", "10.0.0.3"}, - refs: []*logproto.GroupedChunkRefs{ - {Fingerprint: 2, Refs: []*logproto.ShortRef{{Checksum: 2}}}, + instance: addrsWithTokenRange{ + id: "instance-1", + addrs: []string{"10.0.0.1"}, + }, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}}, + {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}}, + }, + }, + { + instance: addrsWithTokenRange{ + id: "instance-3", + addrs: []string{"10.0.0.3"}, + }, + fingerprints: []*logproto.GroupedChunkRefs{ + {Fingerprint: 1029997045, Refs: []*logproto.ShortRef{{Checksum: 6}}}, }, }, }, }, } + + subRing := newMockRing(instances) for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - res := c.groupStreamsByAddr(tc.chunks, tc.addresses) + // sort chunks here, to be able to write more human readable test input + sort.Slice(tc.chunks, func(i, j int) bool { + return tc.chunks[i].Fingerprint < tc.chunks[j].Fingerprint + }) + + res, err := c.groupFingerprintsByServer(tc.chunks, subRing, instances) + require.NoError(t, err) require.Equal(t, tc.expected, res) }) } } + +// make sure mockRing implements the ring.ReadRing interface +var _ ring.ReadRing = &mockRing{} + +func newMockRing(instances []ring.InstanceDesc) *mockRing { + it := newInstanceSortMergeIterator(instances) + ranges := make([]instanceWithToken, 0) + for it.Next() { + ranges = append(ranges, it.At()) + } + return &mockRing{ + instances: instances, + ranges: ranges, + } +} + +type mockRing struct { + instances []ring.InstanceDesc + ranges []instanceWithToken +} + +// Get implements ring.ReadRing. +func (r *mockRing) Get(key uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) { + idx, _ := sort.Find(len(r.ranges), func(i int) int { + if r.ranges[i].token < key { + return 1 + } + if r.ranges[i].token > key { + return -1 + } + return 0 + }) + return ring.ReplicationSet{Instances: []ring.InstanceDesc{r.ranges[idx].instance}}, nil +} + +// GetAllHealthy implements ring.ReadRing. +func (r *mockRing) GetAllHealthy(_ ring.Operation) (ring.ReplicationSet, error) { + return ring.ReplicationSet{ + Instances: r.instances, + }, nil +} + +// GetInstanceState implements ring.ReadRing. +func (*mockRing) GetInstanceState(_ string) (ring.InstanceState, error) { + panic("unimplemented") +} + +// GetReplicationSetForOperation implements ring.ReadRing. +func (*mockRing) GetReplicationSetForOperation(_ ring.Operation) (ring.ReplicationSet, error) { + panic("unimplemented") +} + +// HasInstance implements ring.ReadRing. +func (*mockRing) HasInstance(_ string) bool { + panic("unimplemented") +} + +// InstancesCount implements ring.ReadRing. +func (r *mockRing) InstancesCount() int { + return len(r.instances) +} + +// ReplicationFactor implements ring.ReadRing. +func (*mockRing) ReplicationFactor() int { + return 1 +} + +// ShuffleShard implements ring.ReadRing. +func (*mockRing) ShuffleShard(_ string, _ int) ring.ReadRing { + panic("unimplemented") +} + +// ShuffleShardWithLookback implements ring.ReadRing. +func (*mockRing) ShuffleShardWithLookback(_ string, _ int, _ time.Duration, _ time.Time) ring.ReadRing { + panic("unimplemented") +} + +// CleanupShuffleShardCache implements ring.ReadRing. +func (*mockRing) CleanupShuffleShardCache(_ string) { + panic("unimplemented") +} diff --git a/pkg/bloomgateway/config.go b/pkg/bloomgateway/config.go index e5d35c42edf5e..3eb94324bd7e8 100644 --- a/pkg/bloomgateway/config.go +++ b/pkg/bloomgateway/config.go @@ -38,6 +38,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { } type Limits interface { + CacheLimits BloomGatewayShardSize(tenantID string) int BloomGatewayEnabled(tenantID string) bool BloomGatewayBlocksDownloadingParallelism(tenantID string) int diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go index ab7b9eb40500e..ec9e2a45842d6 100644 --- a/pkg/bloomgateway/querier.go +++ b/pkg/bloomgateway/querier.go @@ -31,9 +31,9 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from return chunkRefs, nil } - // TODO(chaudum): Make buffer pool to reduce allocations. // The indexes of the chunks slice correspond to the indexes of the fingerprint slice. - grouped := make([]*logproto.GroupedChunkRefs, 0, len(chunkRefs)) + grouped := groupedChunksRefPool.Get(len(chunkRefs)) + defer groupedChunksRefPool.Put(grouped) grouped = groupChunkRefs(chunkRefs, grouped) refs, err := bq.c.FilterChunks(ctx, tenant, from, through, grouped, filters...) @@ -41,8 +41,6 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from return nil, err } - // TODO(chaudum): Cache response - // Flatten response from client and return result := make([]*logproto.ChunkRef, 0, len(chunkRefs)) for i := range refs { diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go index 87187e071b82d..dc95da534d0a6 100644 --- a/pkg/bloomgateway/util.go +++ b/pkg/bloomgateway/util.go @@ -5,6 +5,7 @@ import ( "time" "github.com/prometheus/common/model" + "golang.org/x/exp/slices" "github.com/grafana/loki/pkg/logproto" v1 "github.com/grafana/loki/pkg/storage/bloom/v1" @@ -76,15 +77,22 @@ func getDayTime(ts model.Time) time.Time { return time.Date(ts.Time().Year(), ts.Time().Month(), ts.Time().Day(), 0, 0, 0, 0, time.UTC) } -// TODO(chaudum): Fix Through time calculation // getFromThrough assumes a list of ShortRefs sorted by From time -// However, it does also assume that the last item has the highest -// Through time, which might not be the case! func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) { if len(refs) == 0 { return model.Earliest, model.Latest } - return refs[0].From, refs[len(refs)-1].Through + + maxItem := slices.MaxFunc(refs, func(a, b *logproto.ShortRef) int { + if a.Through > b.Through { + return 1 + } else if a.Through < b.Through { + return -1 + } + return 0 + }) + + return refs[0].From, maxItem.Through } // convertToSearches converts a list of line filter expressions to a list of diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go index 1424c56a19153..70e3d89eb2143 100644 --- a/pkg/bloomgateway/util_test.go +++ b/pkg/bloomgateway/util_test.go @@ -3,6 +3,7 @@ package bloomgateway import ( "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/logproto" @@ -37,6 +38,23 @@ func TestSliceIterWithIndex(t *testing.T) { }) } +func TestGetFromThrough(t *testing.T) { + chunks := []*logproto.ShortRef{ + {From: 0, Through: 6}, + {From: 1, Through: 5}, + {From: 2, Through: 9}, + {From: 3, Through: 8}, + {From: 4, Through: 7}, + } + from, through := getFromThrough(chunks) + require.Equal(t, model.Time(0), from) + require.Equal(t, model.Time(9), through) + + // assert that slice order did not change + require.Equal(t, model.Time(0), chunks[0].From) + require.Equal(t, model.Time(4), chunks[len(chunks)-1].From) +} + func mkBlockRef(minFp, maxFp uint64) bloomshipper.BlockRef { return bloomshipper.BlockRef{ Ref: bloomshipper.Ref{ diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go index f39632b1219ff..9de580166ea4d 100644 --- a/pkg/bloomgateway/worker.go +++ b/pkg/bloomgateway/worker.go @@ -20,6 +20,8 @@ import ( type workerConfig struct { maxWaitTime time.Duration maxItems int + + processBlocksSequentially bool } type workerMetrics struct { @@ -186,13 +188,11 @@ func (w *worker) running(ctx context.Context) error { blockRefs = append(blockRefs, b.blockRef) } - // GetBlockQueriersForBlockRefs() waits until all blocks are downloaded and available for querying. - // TODO(chaudum): Add API that allows to process blocks as soon as they become available. - // This will require to change the taskMergeIterator to a slice of requests so we can seek - // to the appropriate fingerprint range within the slice that matches the block's fingerprint range. - storeFetchStart = time.Now() - blockQueriers, err := w.store.GetBlockQueriersForBlockRefs(taskCtx, tasks[0].Tenant, blockRefs) - w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockQueriersForBlockRefs").Observe(time.Since(storeFetchStart).Seconds()) + if w.cfg.processBlocksSequentially { + err = w.processBlocksSequentially(taskCtx, tasks[0].Tenant, day, blockRefs, boundedRefs) + } else { + err = w.processBlocksWithCallback(taskCtx, tasks[0].Tenant, day, blockRefs, boundedRefs) + } if err != nil { for _, t := range tasks { t.ErrCh <- err @@ -200,17 +200,6 @@ func (w *worker) running(ctx context.Context) error { // continue with tasks of next day continue } - - for i, blockQuerier := range blockQueriers { - it := newTaskMergeIterator(day, boundedRefs[i].tasks...) - fq := blockQuerier.Fuse([]v1.PeekingIterator[v1.Request]{it}) - err := fq.Run() - if err != nil { - for _, t := range boundedRefs[i].tasks { - t.ErrCh <- errors.Wrap(err, "failed to run chunk check") - } - } - } } // return dequeued items back to the pool @@ -225,3 +214,40 @@ func (w *worker) stopping(err error) error { w.queue.UnregisterConsumerConnection(w.id) return nil } + +func (w *worker) processBlocksWithCallback(taskCtx context.Context, tenant string, day time.Time, blockRefs []bloomshipper.BlockRef, boundedRefs []boundedTasks) error { + return w.store.ForEach(taskCtx, tenant, blockRefs, func(bq *v1.BlockQuerier, minFp, maxFp uint64) error { + for _, b := range boundedRefs { + if b.blockRef.MinFingerprint == minFp && b.blockRef.MaxFingerprint == maxFp { + processBlock(bq, day, b.tasks) + return nil + } + } + return nil + }) +} + +func (w *worker) processBlocksSequentially(taskCtx context.Context, tenant string, day time.Time, blockRefs []bloomshipper.BlockRef, boundedRefs []boundedTasks) error { + storeFetchStart := time.Now() + blockQueriers, err := w.store.GetBlockQueriersForBlockRefs(taskCtx, tenant, blockRefs) + w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockQueriersForBlockRefs").Observe(time.Since(storeFetchStart).Seconds()) + if err != nil { + return err + } + + for i := range blockQueriers { + processBlock(blockQueriers[i].BlockQuerier, day, boundedRefs[i].tasks) + } + return nil +} + +func processBlock(blockQuerier *v1.BlockQuerier, day time.Time, tasks []Task) { + it := newTaskMergeIterator(day, tasks...) + fq := blockQuerier.Fuse([]v1.PeekingIterator[v1.Request]{it}) + err := fq.Run() + if err != nil { + for _, t := range tasks { + t.ErrCh <- errors.Wrap(err, "failed to run chunk check") + } + } +} diff --git a/pkg/compactor/metrics.go b/pkg/compactor/metrics.go index 28b205789693c..96fc9b16541e1 100644 --- a/pkg/compactor/metrics.go +++ b/pkg/compactor/metrics.go @@ -49,7 +49,7 @@ func newMetrics(r prometheus.Registerer) *metrics { Help: "Time (in seconds) spent in applying retention", }), applyRetentionLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Namespace: "loki_boltdb_shipper", + Namespace: "loki_compactor", Name: "apply_retention_last_successful_run_timestamp_seconds", Help: "Unix timestamp of the last successful retention run", }), diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 147262ff22e51..29875c4847aad 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -37,6 +37,7 @@ import ( "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/runtime" "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/chunk" @@ -851,6 +852,16 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie // initialize stats collection for ingester queries. _, ctx := stats.NewContext(queryServer.Context()) + if req.Plan == nil { + parsed, err := syntax.ParseLogSelector(req.Selector, true) + if err != nil { + return err + } + req.Plan = &plan.QueryPlan{ + AST: parsed, + } + } + instanceID, err := tenant.TenantID(ctx) if err != nil { return err @@ -874,6 +885,7 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie Limit: req.Limit, Shards: req.Shards, Deletes: req.Deletes, + Plan: req.Plan, }} storeItr, err := i.store.SelectLogs(ctx, storeReq) if err != nil { @@ -900,6 +912,17 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log _, ctx := stats.NewContext(queryServer.Context()) sp := opentracing.SpanFromContext(ctx) + // If the plan is empty we want all series to be returned. + if req.Plan == nil { + parsed, err := syntax.ParseSampleExpr(req.Selector) + if err != nil { + return err + } + req.Plan = &plan.QueryPlan{ + AST: parsed, + } + } + instanceID, err := tenant.TenantID(ctx) if err != nil { return err @@ -925,6 +948,7 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log Selector: req.Selector, Shards: req.Shards, Deletes: req.Deletes, + Plan: req.Plan, }} storeItr, err := i.store.SelectSamples(ctx, storeReq) if err != nil { @@ -1234,6 +1258,16 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_ default: } + if req.Plan == nil { + parsed, err := syntax.ParseLogSelector(req.Query, true) + if err != nil { + return err + } + req.Plan = &plan.QueryPlan{ + AST: parsed, + } + } + instanceID, err := tenant.TenantID(queryServer.Context()) if err != nil { return err @@ -1243,7 +1277,13 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_ if err != nil { return err } - tailer, err := newTailer(instanceID, req.Query, queryServer, i.cfg.MaxDroppedStreams) + + expr, ok := req.Plan.AST.(syntax.LogSelectorExpr) + if !ok { + return fmt.Errorf("unsupported query expression: want (LogSelectorExpr), got (%T)", req.Plan.AST) + } + + tailer, err := newTailer(instanceID, expr, queryServer, i.cfg.MaxDroppedStreams) if err != nil { return err } diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 3da4cd356daab..0e43daf2cde42 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -35,6 +35,8 @@ import ( "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/logql/syntax" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/runtime" "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/chunk/fetcher" @@ -812,6 +814,9 @@ func Test_DedupeIngester(t *testing.T) { End: time.Unix(0, requests+1), Limit: uint32(requests * streamCount), Direction: logproto.BACKWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"} | label_format bar=""`), + }, }) require.NoError(t, err) iterators = append(iterators, iter.NewQueryClientIterator(stream, logproto.BACKWARD)) @@ -870,6 +875,9 @@ func Test_DedupeIngester(t *testing.T) { Selector: `sum(rate({foo="bar"}[1m])) by (bar)`, Start: time.Unix(0, 0), End: time.Unix(0, requests+1), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum(rate({foo="bar"}[1m])) by (bar)`), + }, }) require.NoError(t, err) iterators = append(iterators, iter.NewSampleQueryClientIterator(stream)) @@ -905,6 +913,9 @@ func Test_DedupeIngester(t *testing.T) { Selector: `sum(rate({foo="bar"}[1m]))`, Start: time.Unix(0, 0), End: time.Unix(0, requests+1), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum(rate({foo="bar"}[1m]))`), + }, }) require.NoError(t, err) iterators = append(iterators, iter.NewSampleQueryClientIterator(stream)) @@ -965,6 +976,9 @@ func Test_DedupeIngesterParser(t *testing.T) { End: time.Unix(0, int64(requests+1)), Limit: uint32(requests * streamCount * 2), Direction: logproto.BACKWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"} | json`), + }, }) require.NoError(t, err) iterators = append(iterators, iter.NewQueryClientIterator(stream, logproto.BACKWARD)) @@ -992,6 +1006,9 @@ func Test_DedupeIngesterParser(t *testing.T) { End: time.Unix(0, int64(requests+1)), Limit: uint32(requests * streamCount * 2), Direction: logproto.FORWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"} | json`), + }, }) require.NoError(t, err) iterators = append(iterators, iter.NewQueryClientIterator(stream, logproto.FORWARD)) @@ -1016,6 +1033,9 @@ func Test_DedupeIngesterParser(t *testing.T) { Selector: `rate({foo="bar"} | json [1m])`, Start: time.Unix(0, 0), End: time.Unix(0, int64(requests+1)), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`rate({foo="bar"} | json [1m])`), + }, }) require.NoError(t, err) iterators = append(iterators, iter.NewSampleQueryClientIterator(stream)) @@ -1041,6 +1061,9 @@ func Test_DedupeIngesterParser(t *testing.T) { Selector: `sum by (c,d,e,foo) (rate({foo="bar"} | json [1m]))`, Start: time.Unix(0, 0), End: time.Unix(0, int64(requests+1)), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`sum by (c,d,e,foo) (rate({foo="bar"} | json [1m]))`), + }, }) require.NoError(t, err) iterators = append(iterators, iter.NewSampleQueryClientIterator(stream)) diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index ac29f3516df45..492f78404cb92 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -21,6 +21,7 @@ import ( "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/querier/astmapper" + "github.com/grafana/loki/pkg/querier/plan" loki_runtime "github.com/grafana/loki/pkg/runtime" "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/config" @@ -537,7 +538,9 @@ func Benchmark_instance_addNewTailer(b *testing.B) { ctx := context.Background() inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, NewStreamRateCalculator(), nil) - t, err := newTailer("foo", `{namespace="foo",pod="bar",instance=~"10.*"}`, nil, 10) + expr, err := syntax.ParseLogSelector(`{namespace="foo",pod="bar",instance=~"10.*"}`, true) + require.NoError(b, err) + t, err := newTailer("foo", expr, nil, 10) require.NoError(b, err) for i := 0; i < 10000; i++ { require.NoError(b, inst.Push(ctx, &logproto.PushRequest{ @@ -596,6 +599,9 @@ func Test_Iterator(t *testing.T) { Start: time.Unix(0, 0), End: time.Unix(0, 100000000), Direction: logproto.BACKWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{job="3"} | logfmt`), + }, }, }, ) @@ -648,6 +654,9 @@ func Test_ChunkFilter(t *testing.T) { Start: time.Unix(0, 0), End: time.Unix(0, 100000000), Direction: logproto.BACKWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{job="3"}`), + }, }, }, ) @@ -690,6 +699,9 @@ func Test_QueryWithDelete(t *testing.T) { End: 10 * 1e6, }, }, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{job="3"}`), + }, }, }, ) @@ -730,6 +742,9 @@ func Test_QuerySampleWithDelete(t *testing.T) { End: 10 * 1e6, }, }, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`count_over_time({job="3"}[5m])`), + }, }, }, ) diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go index 641fd1c926523..d1b01f22746c2 100644 --- a/pkg/ingester/stream_test.go +++ b/pkg/ingester/stream_test.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/log" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/util/flagext" "github.com/grafana/loki/pkg/validation" ) @@ -524,7 +525,9 @@ func Benchmark_PushStream(b *testing.B) { chunkfmt, headfmt := defaultChunkFormat(b) s := newStream(chunkfmt, headfmt, &Config{MaxChunkAge: 24 * time.Hour}, limiter, "fake", model.Fingerprint(0), ls, true, NewStreamRateCalculator(), NilMetrics, nil) - t, err := newTailer("foo", `{namespace="loki-dev"}`, &fakeTailServer{}, 10) + expr, err := syntax.ParseLogSelector(`{namespace="loki-dev"}`, true) + require.NoError(b, err) + t, err := newTailer("foo", expr, &fakeTailServer{}, 10) require.NoError(b, err) go t.loop() diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index 72e7026e810e7..3e9a8a64cfd88 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -46,11 +46,7 @@ type tailer struct { conn TailServer } -func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*tailer, error) { - expr, err := syntax.ParseLogSelector(query, true) - if err != nil { - return nil, err - } +func newTailer(orgID string, expr syntax.LogSelectorExpr, conn TailServer, maxDroppedStreams int) (*tailer, error) { // Make sure we can build a pipeline. The stream processing code doesn't have a place to handle // this error so make sure we handle it here. pipeline, err := expr.Pipeline() @@ -66,7 +62,7 @@ func newTailer(orgID, query string, conn TailServer, maxDroppedStreams int) (*ta conn: conn, droppedStreams: make([]*logproto.DroppedStream, 0, maxDroppedStreams), maxDroppedStreams: maxDroppedStreams, - id: generateUniqueID(orgID, query), + id: generateUniqueID(orgID, expr.String()), closeChan: make(chan struct{}), pipeline: pipeline, }, nil diff --git a/pkg/ingester/tailer_test.go b/pkg/ingester/tailer_test.go index 59293352030df..674dde3df8af0 100644 --- a/pkg/ingester/tailer_test.go +++ b/pkg/ingester/tailer_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" ) func TestTailer_sendRaceConditionOnSendWhileClosing(t *testing.T) { @@ -26,7 +27,9 @@ func TestTailer_sendRaceConditionOnSendWhileClosing(t *testing.T) { } for run := 0; run < runs; run++ { - tailer, err := newTailer("org-id", stream.Labels, nil, 10) + expr, err := syntax.ParseLogSelector(stream.Labels, true) + require.NoError(t, err) + tailer, err := newTailer("org-id", expr, nil, 10) require.NoError(t, err) require.NotNil(t, tailer) @@ -78,7 +81,9 @@ func Test_dropstream(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - tail, err := newTailer("foo", `{app="foo"} |= "foo"`, &fakeTailServer{}, maxDroppedStreams) + expr, err := syntax.ParseLogSelector(`{app="foo"} |= "foo"`, true) + require.NoError(t, err) + tail, err := newTailer("foo", expr, &fakeTailServer{}, maxDroppedStreams) require.NoError(t, err) for i := 0; i < c.drop; i++ { @@ -114,7 +119,9 @@ func (f *fakeTailServer) Reset() { } func Test_TailerSendRace(t *testing.T) { - tail, err := newTailer("foo", `{app="foo"} |= "foo"`, &fakeTailServer{}, 10) + expr, err := syntax.ParseLogSelector(`{app="foo"} |= "foo"`, true) + require.NoError(t, err) + tail, err := newTailer("foo", expr, &fakeTailServer{}, 10) require.NoError(t, err) var wg sync.WaitGroup @@ -250,7 +257,9 @@ func Test_StructuredMetadata(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { var server fakeTailServer - tail, err := newTailer("foo", tc.query, &server, 10) + expr, err := syntax.ParseLogSelector(tc.query, true) + require.NoError(t, err) + tail, err := newTailer("foo", expr, &server, 10) require.NoError(t, err) var wg sync.WaitGroup diff --git a/pkg/loghttp/tail.go b/pkg/loghttp/tail.go index 6b9b5ad7d131d..9ad2219b10979 100644 --- a/pkg/loghttp/tail.go +++ b/pkg/loghttp/tail.go @@ -11,6 +11,8 @@ import ( "github.com/grafana/dskit/httpgrpc" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" + "github.com/grafana/loki/pkg/querier/plan" ) const ( @@ -67,8 +69,16 @@ func (s *DroppedStream) UnmarshalJSON(data []byte) error { // ParseTailQuery parses a TailRequest request from an http request. func ParseTailQuery(r *http.Request) (*logproto.TailRequest, error) { var err error + qs := query(r) + parsed, err := syntax.ParseExpr(qs) + if err != nil { + return nil, err + } req := logproto.TailRequest{ - Query: query(r), + Query: qs, + Plan: &plan.QueryPlan{ + AST: parsed, + }, } req.Query, err = parseRegexQuery(r) diff --git a/pkg/loghttp/tail_test.go b/pkg/loghttp/tail_test.go index f5b2039723699..6fe7163116675 100644 --- a/pkg/loghttp/tail_test.go +++ b/pkg/loghttp/tail_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" + "github.com/grafana/loki/pkg/querier/plan" ) func TestParseTailQuery(t *testing.T) { @@ -38,6 +40,9 @@ func TestParseTailQuery(t *testing.T) { DelayFor: 5, Start: time.Date(2017, 06, 10, 21, 42, 24, 760738998, time.UTC), Limit: 1000, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"}`), + }, }, false}, } for _, tt := range tests { diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go index fdf6f6b169cd7..268e588d3455c 100644 --- a/pkg/logproto/compat.go +++ b/pkg/logproto/compat.go @@ -1,6 +1,7 @@ package logproto import ( + "encoding/binary" stdjson "encoding/json" "fmt" "math" @@ -10,6 +11,7 @@ import ( "time" "unsafe" + "github.com/cespare/xxhash/v2" jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" @@ -18,6 +20,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util" ) @@ -260,6 +263,11 @@ func (m *IndexStatsRequest) WithStartEnd(start, end time.Time) definitions.Reque return &clone } +// WithStartEndForCache implements resultscache.Request. +func (m *IndexStatsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + return m.WithStartEnd(start, end).(resultscache.Request) +} + // WithQuery clone the current request with a different query. func (m *IndexStatsRequest) WithQuery(query string) definitions.Request { clone := *m @@ -308,6 +316,11 @@ func (m *VolumeRequest) WithStartEnd(start, end time.Time) definitions.Request { return &clone } +// WithStartEndForCache implements resultscache.Request. +func (m *VolumeRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + return m.WithStartEnd(start, end).(resultscache.Request) +} + // WithQuery clone the current request with a different query. func (m *VolumeRequest) WithQuery(query string) definitions.Request { clone := *m @@ -323,3 +336,83 @@ func (m *VolumeRequest) LogToSpan(sp opentracing.Span) { otlog.String("end", timestamp.Time(int64(m.Through)).String()), ) } + +// Satisfy definitions.Request for FilterChunkRefRequest + +// GetStart returns the start timestamp of the request in milliseconds. +func (m *FilterChunkRefRequest) GetStart() time.Time { + return time.UnixMilli(int64(m.From)) +} + +// GetEnd returns the end timestamp of the request in milliseconds. +func (m *FilterChunkRefRequest) GetEnd() time.Time { + return time.UnixMilli(int64(m.Through)) +} + +// GetStep returns the step of the request in milliseconds. Always 0. +func (m *FilterChunkRefRequest) GetStep() int64 { + return 0 +} + +// GetQuery returns the query of the request. +// The query is the hash for the input chunks refs and the filter expressions. +func (m *FilterChunkRefRequest) GetQuery() string { + var encodeBuf []byte + var chunksHash uint64 + if len(m.Refs) > 0 { + h := xxhash.New() + for _, ref := range m.Refs { + _, _ = h.Write(binary.AppendUvarint(encodeBuf[:0], ref.Fingerprint)) + } + chunksHash = h.Sum64() + } + + // Short circuit if there are no filters. + if len(m.Filters) == 0 { + return fmt.Sprintf("%d", chunksHash) + } + + var sb strings.Builder + for i, filter := range m.Filters { + if i > 0 { + sb.WriteString(",") + } + sb.Write(fmt.Appendf(encodeBuf[:0], "%d", filter.Operator)) + sb.WriteString("-") + sb.WriteString(filter.Match) + } + + return fmt.Sprintf("%d/%s", chunksHash, sb.String()) +} + +// GetCachingOptions returns the caching options. +func (m *FilterChunkRefRequest) GetCachingOptions() (res resultscache.CachingOptions) { return } + +// WithStartEndForCache implements resultscache.Request. +func (m *FilterChunkRefRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + // We Remove the chunks that are not within the given time range. + chunkRefs := make([]*GroupedChunkRefs, 0, len(m.Refs)) + for _, chunkRef := range m.Refs { + refs := make([]*ShortRef, 0, len(chunkRef.Refs)) + for _, ref := range chunkRef.Refs { + if end.Before(ref.From.Time()) || ref.Through.Time().Before(start) { + continue + } + refs = append(refs, ref) + } + if len(refs) > 0 { + chunkRefs = append(chunkRefs, &GroupedChunkRefs{ + Fingerprint: chunkRef.Fingerprint, + Tenant: chunkRef.Tenant, + Refs: refs, + }) + } + } + + clone := *m + clone.From = model.TimeFromUnixNano(start.UnixNano()) + clone.Through = model.TimeFromUnixNano(end.UnixNano()) + clone.Refs = chunkRefs + + return &clone +} diff --git a/pkg/logproto/compat_test.go b/pkg/logproto/compat_test.go index 84afa501b68dd..2547c12de968f 100644 --- a/pkg/logproto/compat_test.go +++ b/pkg/logproto/compat_test.go @@ -278,6 +278,74 @@ func TestMergeSeriesResponses(t *testing.T) { } } +func TestFilterChunkRefRequestGetQuery(t *testing.T) { + for _, tc := range []struct { + desc string + request FilterChunkRefRequest + expected string + }{ + { + desc: "empty request", + expected: `0`, + }, + { + desc: "request no filters", + request: FilterChunkRefRequest{ + Refs: []*GroupedChunkRefs{ + { + Fingerprint: 1, + Tenant: "test", + }, + }, + }, + expected: `9962287286179718960`, + }, + { + desc: "request with filters but no chunks", + request: FilterChunkRefRequest{ + Filters: []*LineFilterExpression{ + { + Operator: 0, + Match: "uuid", + }, + }, + }, + expected: `0/0-uuid`, + }, + { + desc: "request with filters and chunks", + request: FilterChunkRefRequest{ + Refs: []*GroupedChunkRefs{ + { + Fingerprint: 1, + Tenant: "test", + }, + { + Fingerprint: 2, + Tenant: "test", + }, + }, + Filters: []*LineFilterExpression{ + { + Operator: 0, + Match: "uuid", + }, + { + Operator: 1, + Match: "trace", + }, + }, + }, + expected: `8827404902424034886/0-uuid,1-trace`, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + actual := tc.request.GetQuery() + require.Equal(t, tc.expected, actual) + }) + } +} + func benchmarkMergeLabelResponses(b *testing.B, responses []*LabelResponse) { b.ReportAllocs() for n := 0; n < b.N; n++ { diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index 6aa905ab98a82..f339745f5c149 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -16,6 +16,7 @@ import ( stats "github.com/grafana/loki/pkg/logqlmodel/stats" _ "github.com/grafana/loki/pkg/push" github_com_grafana_loki_pkg_push "github.com/grafana/loki/pkg/push" + github_com_grafana_loki_pkg_querier_plan "github.com/grafana/loki/pkg/querier/plan" github_com_prometheus_common_model "github.com/prometheus/common/model" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -216,13 +217,14 @@ func (m *StreamRate) GetPushes() uint32 { } type QueryRequest struct { - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"` - End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"` - Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` - Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"` - Deletes []*Delete `protobuf:"bytes,8,rep,name=deletes,proto3" json:"deletes,omitempty"` + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Deprecated: Do not use. + Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"` + Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` + Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"` + Deletes []*Delete `protobuf:"bytes,8,rep,name=deletes,proto3" json:"deletes,omitempty"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,9,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *QueryRequest) Reset() { *m = QueryRequest{} } @@ -257,6 +259,7 @@ func (m *QueryRequest) XXX_DiscardUnknown() { var xxx_messageInfo_QueryRequest proto.InternalMessageInfo +// Deprecated: Do not use. func (m *QueryRequest) GetSelector() string { if m != nil { return m.Selector @@ -307,11 +310,12 @@ func (m *QueryRequest) GetDeletes() []*Delete { } type SampleQueryRequest struct { - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` - End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` - Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"` - Deletes []*Delete `protobuf:"bytes,5,rep,name=deletes,proto3" json:"deletes,omitempty"` + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Deprecated: Do not use. + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` + Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"` + Deletes []*Delete `protobuf:"bytes,5,rep,name=deletes,proto3" json:"deletes,omitempty"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,6,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *SampleQueryRequest) Reset() { *m = SampleQueryRequest{} } @@ -346,6 +350,7 @@ func (m *SampleQueryRequest) XXX_DiscardUnknown() { var xxx_messageInfo_SampleQueryRequest proto.InternalMessageInfo +// Deprecated: Do not use. func (m *SampleQueryRequest) GetSelector() string { if m != nil { return m.Selector @@ -381,6 +386,49 @@ func (m *SampleQueryRequest) GetDeletes() []*Delete { return nil } +type Plan struct { + Raw []byte `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"` +} + +func (m *Plan) Reset() { *m = Plan{} } +func (*Plan) ProtoMessage() {} +func (*Plan) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{5} +} +func (m *Plan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Plan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Plan) XXX_Merge(src proto.Message) { + xxx_messageInfo_Plan.Merge(m, src) +} +func (m *Plan) XXX_Size() int { + return m.Size() +} +func (m *Plan) XXX_DiscardUnknown() { + xxx_messageInfo_Plan.DiscardUnknown(m) +} + +var xxx_messageInfo_Plan proto.InternalMessageInfo + +func (m *Plan) GetRaw() []byte { + if m != nil { + return m.Raw + } + return nil +} + type Delete struct { Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` @@ -390,7 +438,7 @@ type Delete struct { func (m *Delete) Reset() { *m = Delete{} } func (*Delete) ProtoMessage() {} func (*Delete) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{5} + return fileDescriptor_c28a5f14f1f4c79a, []int{6} } func (m *Delete) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -448,7 +496,7 @@ type QueryResponse struct { func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (*QueryResponse) ProtoMessage() {} func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{6} + return fileDescriptor_c28a5f14f1f4c79a, []int{7} } func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -492,7 +540,7 @@ type SampleQueryResponse struct { func (m *SampleQueryResponse) Reset() { *m = SampleQueryResponse{} } func (*SampleQueryResponse) ProtoMessage() {} func (*SampleQueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{7} + return fileDescriptor_c28a5f14f1f4c79a, []int{8} } func (m *SampleQueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -539,7 +587,7 @@ type LabelRequest struct { func (m *LabelRequest) Reset() { *m = LabelRequest{} } func (*LabelRequest) ProtoMessage() {} func (*LabelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{8} + return fileDescriptor_c28a5f14f1f4c79a, []int{9} } func (m *LabelRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +658,7 @@ type LabelResponse struct { func (m *LabelResponse) Reset() { *m = LabelResponse{} } func (*LabelResponse) ProtoMessage() {} func (*LabelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{9} + return fileDescriptor_c28a5f14f1f4c79a, []int{10} } func (m *LabelResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -655,7 +703,7 @@ type Sample struct { func (m *Sample) Reset() { *m = Sample{} } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{10} + return fileDescriptor_c28a5f14f1f4c79a, []int{11} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -714,7 +762,7 @@ type LegacySample struct { func (m *LegacySample) Reset() { *m = LegacySample{} } func (*LegacySample) ProtoMessage() {} func (*LegacySample) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{11} + return fileDescriptor_c28a5f14f1f4c79a, []int{12} } func (m *LegacySample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +814,7 @@ type Series struct { func (m *Series) Reset() { *m = Series{} } func (*Series) ProtoMessage() {} func (*Series) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{12} + return fileDescriptor_c28a5f14f1f4c79a, []int{13} } func (m *Series) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -817,16 +865,17 @@ func (m *Series) GetStreamHash() uint64 { } type TailRequest struct { - Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"` - Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` - Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"` + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` // Deprecated: Do not use. + DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"` + Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` + Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"` + Plan *github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,6,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` } func (m *TailRequest) Reset() { *m = TailRequest{} } func (*TailRequest) ProtoMessage() {} func (*TailRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{13} + return fileDescriptor_c28a5f14f1f4c79a, []int{14} } func (m *TailRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -855,6 +904,7 @@ func (m *TailRequest) XXX_DiscardUnknown() { var xxx_messageInfo_TailRequest proto.InternalMessageInfo +// Deprecated: Do not use. func (m *TailRequest) GetQuery() string { if m != nil { return m.Query @@ -891,7 +941,7 @@ type TailResponse struct { func (m *TailResponse) Reset() { *m = TailResponse{} } func (*TailResponse) ProtoMessage() {} func (*TailResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{14} + return fileDescriptor_c28a5f14f1f4c79a, []int{15} } func (m *TailResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -937,7 +987,7 @@ type SeriesRequest struct { func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } func (*SeriesRequest) ProtoMessage() {} func (*SeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{15} + return fileDescriptor_c28a5f14f1f4c79a, []int{16} } func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1001,7 +1051,7 @@ type SeriesResponse struct { func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } func (*SeriesResponse) ProtoMessage() {} func (*SeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{16} + return fileDescriptor_c28a5f14f1f4c79a, []int{17} } func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1044,7 +1094,7 @@ type SeriesIdentifier struct { func (m *SeriesIdentifier) Reset() { *m = SeriesIdentifier{} } func (*SeriesIdentifier) ProtoMessage() {} func (*SeriesIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{17} + return fileDescriptor_c28a5f14f1f4c79a, []int{18} } func (m *SeriesIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1089,7 +1139,7 @@ type DroppedStream struct { func (m *DroppedStream) Reset() { *m = DroppedStream{} } func (*DroppedStream) ProtoMessage() {} func (*DroppedStream) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{18} + return fileDescriptor_c28a5f14f1f4c79a, []int{19} } func (m *DroppedStream) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1147,7 +1197,7 @@ type LabelPair struct { func (m *LabelPair) Reset() { *m = LabelPair{} } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{19} + return fileDescriptor_c28a5f14f1f4c79a, []int{20} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1250,7 @@ type LegacyLabelPair struct { func (m *LegacyLabelPair) Reset() { *m = LegacyLabelPair{} } func (*LegacyLabelPair) ProtoMessage() {} func (*LegacyLabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{20} + return fileDescriptor_c28a5f14f1f4c79a, []int{21} } func (m *LegacyLabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1250,7 +1300,7 @@ type Chunk struct { func (m *Chunk) Reset() { *m = Chunk{} } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{21} + return fileDescriptor_c28a5f14f1f4c79a, []int{22} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1292,7 +1342,7 @@ type TailersCountRequest struct { func (m *TailersCountRequest) Reset() { *m = TailersCountRequest{} } func (*TailersCountRequest) ProtoMessage() {} func (*TailersCountRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{22} + return fileDescriptor_c28a5f14f1f4c79a, []int{23} } func (m *TailersCountRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1328,7 +1378,7 @@ type TailersCountResponse struct { func (m *TailersCountResponse) Reset() { *m = TailersCountResponse{} } func (*TailersCountResponse) ProtoMessage() {} func (*TailersCountResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{23} + return fileDescriptor_c28a5f14f1f4c79a, []int{24} } func (m *TailersCountResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1373,7 +1423,7 @@ type GetChunkIDsRequest struct { func (m *GetChunkIDsRequest) Reset() { *m = GetChunkIDsRequest{} } func (*GetChunkIDsRequest) ProtoMessage() {} func (*GetChunkIDsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{24} + return fileDescriptor_c28a5f14f1f4c79a, []int{25} } func (m *GetChunkIDsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1430,7 +1480,7 @@ type GetChunkIDsResponse struct { func (m *GetChunkIDsResponse) Reset() { *m = GetChunkIDsResponse{} } func (*GetChunkIDsResponse) ProtoMessage() {} func (*GetChunkIDsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{25} + return fileDescriptor_c28a5f14f1f4c79a, []int{26} } func (m *GetChunkIDsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1483,7 +1533,7 @@ type ChunkRef struct { func (m *ChunkRef) Reset() { *m = ChunkRef{} } func (*ChunkRef) ProtoMessage() {} func (*ChunkRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{26} + return fileDescriptor_c28a5f14f1f4c79a, []int{27} } func (m *ChunkRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1544,7 +1594,7 @@ type LabelValuesForMetricNameRequest struct { func (m *LabelValuesForMetricNameRequest) Reset() { *m = LabelValuesForMetricNameRequest{} } func (*LabelValuesForMetricNameRequest) ProtoMessage() {} func (*LabelValuesForMetricNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{27} + return fileDescriptor_c28a5f14f1f4c79a, []int{28} } func (m *LabelValuesForMetricNameRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1603,7 +1653,7 @@ type LabelNamesForMetricNameRequest struct { func (m *LabelNamesForMetricNameRequest) Reset() { *m = LabelNamesForMetricNameRequest{} } func (*LabelNamesForMetricNameRequest) ProtoMessage() {} func (*LabelNamesForMetricNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{28} + return fileDescriptor_c28a5f14f1f4c79a, []int{29} } func (m *LabelNamesForMetricNameRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1647,7 +1697,7 @@ type LineFilterExpression struct { func (m *LineFilterExpression) Reset() { *m = LineFilterExpression{} } func (*LineFilterExpression) ProtoMessage() {} func (*LineFilterExpression) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{29} + return fileDescriptor_c28a5f14f1f4c79a, []int{30} } func (m *LineFilterExpression) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1700,7 +1750,7 @@ type GetChunkRefRequest struct { func (m *GetChunkRefRequest) Reset() { *m = GetChunkRefRequest{} } func (*GetChunkRefRequest) ProtoMessage() {} func (*GetChunkRefRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{30} + return fileDescriptor_c28a5f14f1f4c79a, []int{31} } func (m *GetChunkRefRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1750,7 +1800,7 @@ type GetChunkRefResponse struct { func (m *GetChunkRefResponse) Reset() { *m = GetChunkRefResponse{} } func (*GetChunkRefResponse) ProtoMessage() {} func (*GetChunkRefResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{31} + return fileDescriptor_c28a5f14f1f4c79a, []int{32} } func (m *GetChunkRefResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1795,7 +1845,7 @@ type GetSeriesRequest struct { func (m *GetSeriesRequest) Reset() { *m = GetSeriesRequest{} } func (*GetSeriesRequest) ProtoMessage() {} func (*GetSeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{32} + return fileDescriptor_c28a5f14f1f4c79a, []int{33} } func (m *GetSeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1838,7 +1888,7 @@ type GetSeriesResponse struct { func (m *GetSeriesResponse) Reset() { *m = GetSeriesResponse{} } func (*GetSeriesResponse) ProtoMessage() {} func (*GetSeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{33} + return fileDescriptor_c28a5f14f1f4c79a, []int{34} } func (m *GetSeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1882,7 +1932,7 @@ type IndexSeries struct { func (m *IndexSeries) Reset() { *m = IndexSeries{} } func (*IndexSeries) ProtoMessage() {} func (*IndexSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{34} + return fileDescriptor_c28a5f14f1f4c79a, []int{35} } func (m *IndexSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1919,7 +1969,7 @@ type QueryIndexResponse struct { func (m *QueryIndexResponse) Reset() { *m = QueryIndexResponse{} } func (*QueryIndexResponse) ProtoMessage() {} func (*QueryIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{35} + return fileDescriptor_c28a5f14f1f4c79a, []int{36} } func (m *QueryIndexResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1970,7 +2020,7 @@ type Row struct { func (m *Row) Reset() { *m = Row{} } func (*Row) ProtoMessage() {} func (*Row) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{36} + return fileDescriptor_c28a5f14f1f4c79a, []int{37} } func (m *Row) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2020,7 +2070,7 @@ type QueryIndexRequest struct { func (m *QueryIndexRequest) Reset() { *m = QueryIndexRequest{} } func (*QueryIndexRequest) ProtoMessage() {} func (*QueryIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{37} + return fileDescriptor_c28a5f14f1f4c79a, []int{38} } func (m *QueryIndexRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2067,7 +2117,7 @@ type IndexQuery struct { func (m *IndexQuery) Reset() { *m = IndexQuery{} } func (*IndexQuery) ProtoMessage() {} func (*IndexQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{38} + return fileDescriptor_c28a5f14f1f4c79a, []int{39} } func (m *IndexQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2140,7 +2190,7 @@ type IndexStatsRequest struct { func (m *IndexStatsRequest) Reset() { *m = IndexStatsRequest{} } func (*IndexStatsRequest) ProtoMessage() {} func (*IndexStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{39} + return fileDescriptor_c28a5f14f1f4c79a, []int{40} } func (m *IndexStatsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2186,7 +2236,7 @@ type IndexStatsResponse struct { func (m *IndexStatsResponse) Reset() { *m = IndexStatsResponse{} } func (*IndexStatsResponse) ProtoMessage() {} func (*IndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{40} + return fileDescriptor_c28a5f14f1f4c79a, []int{41} } func (m *IndexStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2256,7 +2306,7 @@ type VolumeRequest struct { func (m *VolumeRequest) Reset() { *m = VolumeRequest{} } func (*VolumeRequest) ProtoMessage() {} func (*VolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{41} + return fileDescriptor_c28a5f14f1f4c79a, []int{42} } func (m *VolumeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2328,7 +2378,7 @@ type VolumeResponse struct { func (m *VolumeResponse) Reset() { *m = VolumeResponse{} } func (*VolumeResponse) ProtoMessage() {} func (*VolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{42} + return fileDescriptor_c28a5f14f1f4c79a, []int{43} } func (m *VolumeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2379,7 +2429,7 @@ type Volume struct { func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_c28a5f14f1f4c79a, []int{43} + return fileDescriptor_c28a5f14f1f4c79a, []int{44} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2429,6 +2479,7 @@ func init() { proto.RegisterType((*StreamRate)(nil), "logproto.StreamRate") proto.RegisterType((*QueryRequest)(nil), "logproto.QueryRequest") proto.RegisterType((*SampleQueryRequest)(nil), "logproto.SampleQueryRequest") + proto.RegisterType((*Plan)(nil), "logproto.Plan") proto.RegisterType((*Delete)(nil), "logproto.Delete") proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse") proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse") @@ -2474,145 +2525,150 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 2202 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x8f, 0x1b, 0x49, - 0xd5, 0x6d, 0xb7, 0xbf, 0x9e, 0x3d, 0x93, 0x49, 0x8d, 0x37, 0xb1, 0x9c, 0xc4, 0x9e, 0x94, 0x96, - 0xec, 0x28, 0x9b, 0xb5, 0x37, 0xb3, 0xb0, 0x64, 0x13, 0x16, 0x88, 0x67, 0xf2, 0x31, 0xc9, 0xe4, - 0x83, 0x9a, 0x10, 0xd0, 0x0a, 0x14, 0xf5, 0xd8, 0x65, 0xbb, 0x15, 0xb7, 0xdb, 0xe9, 0x2e, 0x6f, - 0x32, 0x12, 0x07, 0xfe, 0xc0, 0x4a, 0x7b, 0x43, 0x5c, 0x10, 0x07, 0x24, 0x90, 0x10, 0x17, 0x7e, - 0x00, 0x5c, 0x90, 0x08, 0xb7, 0x70, 0x5b, 0x71, 0x30, 0x64, 0x72, 0x41, 0x73, 0xda, 0x1b, 0x12, - 0x07, 0x84, 0xea, 0xab, 0xbb, 0xec, 0xf1, 0xec, 0xae, 0x43, 0x24, 0x94, 0x8b, 0xbb, 0xde, 0xab, - 0x57, 0xaf, 0xde, 0x77, 0xd5, 0x2b, 0xc3, 0x89, 0xe1, 0xc3, 0x6e, 0xa3, 0xef, 0x77, 0x87, 0x81, - 0xcf, 0xfc, 0x68, 0x50, 0x17, 0xbf, 0x28, 0xa7, 0xe1, 0x4a, 0xa9, 0xeb, 0x77, 0x7d, 0x49, 0xc3, - 0x47, 0x72, 0xbe, 0x52, 0xeb, 0xfa, 0x7e, 0xb7, 0x4f, 0x1b, 0x02, 0xda, 0x19, 0x75, 0x1a, 0xcc, - 0xf5, 0x68, 0xc8, 0x1c, 0x6f, 0xa8, 0x08, 0x56, 0x14, 0xf7, 0x47, 0x7d, 0xcf, 0x6f, 0xd3, 0x7e, - 0x23, 0x64, 0x0e, 0x0b, 0xe5, 0xaf, 0xa2, 0x58, 0xe6, 0x14, 0xc3, 0x51, 0xd8, 0x13, 0x3f, 0x12, - 0x89, 0x4b, 0x80, 0xb6, 0x59, 0x40, 0x1d, 0x8f, 0x38, 0x8c, 0x86, 0x84, 0x3e, 0x1a, 0xd1, 0x90, - 0xe1, 0x5b, 0xb0, 0x3c, 0x81, 0x0d, 0x87, 0xfe, 0x20, 0xa4, 0xe8, 0x7d, 0x28, 0x84, 0x31, 0xba, - 0x6c, 0xad, 0xa4, 0x56, 0x0b, 0x6b, 0xa5, 0x7a, 0xa4, 0x4a, 0xbc, 0x86, 0x98, 0x84, 0xf8, 0x17, - 0x16, 0x40, 0x3c, 0x87, 0xaa, 0x00, 0x72, 0xf6, 0xba, 0x13, 0xf6, 0xca, 0xd6, 0x8a, 0xb5, 0x6a, - 0x13, 0x03, 0x83, 0xce, 0xc1, 0xd1, 0x18, 0xba, 0xed, 0x6f, 0xf7, 0x9c, 0xa0, 0x5d, 0x4e, 0x0a, - 0xb2, 0x83, 0x13, 0x08, 0x81, 0x1d, 0x38, 0x8c, 0x96, 0x53, 0x2b, 0xd6, 0x6a, 0x8a, 0x88, 0x31, - 0x3a, 0x06, 0x19, 0x46, 0x07, 0xce, 0x80, 0x95, 0xed, 0x15, 0x6b, 0x35, 0x4f, 0x14, 0xc4, 0xf1, - 0x5c, 0x77, 0x1a, 0x96, 0xd3, 0x2b, 0xd6, 0xea, 0x02, 0x51, 0x10, 0xfe, 0x73, 0x12, 0x8a, 0xdf, - 0x1b, 0xd1, 0x60, 0x57, 0x19, 0x00, 0x55, 0x20, 0x17, 0xd2, 0x3e, 0x6d, 0x31, 0x3f, 0x10, 0x02, - 0xe6, 0x49, 0x04, 0xa3, 0x12, 0xa4, 0xfb, 0xae, 0xe7, 0x32, 0x21, 0xd2, 0x02, 0x91, 0x00, 0xba, - 0x08, 0xe9, 0x90, 0x39, 0x01, 0x13, 0x72, 0x14, 0xd6, 0x2a, 0x75, 0xe9, 0xb0, 0xba, 0x76, 0x58, - 0xfd, 0x9e, 0x76, 0x58, 0x33, 0xf7, 0x74, 0x5c, 0x4b, 0x7c, 0xfa, 0xf7, 0x9a, 0x45, 0xe4, 0x12, - 0xf4, 0x3e, 0xa4, 0xe8, 0xa0, 0x2d, 0x64, 0xfd, 0xaa, 0x2b, 0xf9, 0x02, 0x74, 0x1e, 0xf2, 0x6d, - 0x37, 0xa0, 0x2d, 0xe6, 0xfa, 0x03, 0xa1, 0xd1, 0xe2, 0xda, 0x72, 0xec, 0x8d, 0x0d, 0x3d, 0x45, - 0x62, 0x2a, 0x74, 0x0e, 0x32, 0x21, 0x37, 0x5b, 0x58, 0xce, 0xae, 0xa4, 0x56, 0xf3, 0xcd, 0xd2, - 0xfe, 0xb8, 0xb6, 0x24, 0x31, 0xe7, 0x7c, 0xcf, 0x65, 0xd4, 0x1b, 0xb2, 0x5d, 0xa2, 0x68, 0xd0, - 0x59, 0xc8, 0xb6, 0x69, 0x9f, 0x72, 0x67, 0xe7, 0x84, 0xb3, 0x97, 0x0c, 0xf6, 0x62, 0x82, 0x68, - 0x82, 0x1b, 0x76, 0x2e, 0xb3, 0x94, 0xc5, 0xff, 0xb1, 0x00, 0x6d, 0x3b, 0xde, 0xb0, 0x4f, 0xbf, - 0xb2, 0x3d, 0x23, 0xcb, 0x25, 0x5f, 0xda, 0x72, 0xa9, 0x79, 0x2d, 0x17, 0x9b, 0xc1, 0x9e, 0xcf, - 0x0c, 0xe9, 0x2f, 0x31, 0x03, 0xde, 0x82, 0x8c, 0x44, 0x7d, 0x59, 0x0c, 0xc5, 0x3a, 0xa7, 0xb4, - 0x36, 0x4b, 0xb1, 0x36, 0x29, 0x21, 0x27, 0xfe, 0xa5, 0x05, 0x0b, 0xca, 0x90, 0x2a, 0x07, 0x77, - 0x20, 0x2b, 0x73, 0x40, 0xe7, 0xdf, 0xf1, 0xe9, 0xfc, 0xbb, 0xdc, 0x76, 0x86, 0x8c, 0x06, 0xcd, - 0xc6, 0xd3, 0x71, 0xcd, 0xfa, 0xdb, 0xb8, 0xf6, 0x56, 0xd7, 0x65, 0xbd, 0xd1, 0x4e, 0xbd, 0xe5, - 0x7b, 0x8d, 0x6e, 0xe0, 0x74, 0x9c, 0x81, 0xd3, 0xe8, 0xfb, 0x0f, 0xdd, 0x86, 0xae, 0x07, 0x3a, - 0x6f, 0x35, 0x63, 0xf4, 0xb6, 0x90, 0x8e, 0x85, 0xca, 0x23, 0x47, 0xea, 0xb2, 0x8c, 0x6c, 0x0e, - 0xba, 0x34, 0xe4, 0x9c, 0x6d, 0x6e, 0x4c, 0x22, 0x69, 0xf0, 0x4f, 0x60, 0x79, 0xc2, 0xe1, 0x4a, - 0xce, 0x0b, 0x90, 0x09, 0x69, 0xe0, 0x46, 0x65, 0xc2, 0x30, 0xd9, 0xb6, 0xc0, 0x37, 0x17, 0x95, - 0x7c, 0x19, 0x09, 0x13, 0x45, 0x3f, 0xdf, 0xee, 0x7f, 0xb2, 0xa0, 0xb8, 0xe5, 0xec, 0xd0, 0xbe, - 0x8e, 0x34, 0x04, 0xf6, 0xc0, 0xf1, 0xa8, 0xb2, 0xb8, 0x18, 0xf3, 0xb4, 0xff, 0xd8, 0xe9, 0x8f, - 0xa8, 0x64, 0x99, 0x23, 0x0a, 0x9a, 0x37, 0x67, 0xad, 0x97, 0xce, 0x59, 0x2b, 0x8e, 0xbc, 0x12, - 0xa4, 0x1f, 0x71, 0x43, 0x89, 0x7c, 0xcd, 0x13, 0x09, 0xe0, 0xb7, 0x60, 0x41, 0x69, 0xa1, 0xcc, - 0x17, 0x8b, 0xcc, 0xcd, 0x97, 0xd7, 0x22, 0x63, 0x0f, 0x32, 0xd2, 0xda, 0xe8, 0x4d, 0xc8, 0x47, - 0x67, 0x80, 0xd0, 0x36, 0xd5, 0xcc, 0xec, 0x8f, 0x6b, 0x49, 0x16, 0x92, 0x78, 0x02, 0xd5, 0x20, - 0x2d, 0x56, 0x0a, 0xcd, 0xad, 0x66, 0x7e, 0x7f, 0x5c, 0x93, 0x08, 0x22, 0x3f, 0xe8, 0x24, 0xd8, - 0x3d, 0x5e, 0x86, 0xb9, 0x09, 0xec, 0x66, 0x6e, 0x7f, 0x5c, 0x13, 0x30, 0x11, 0xbf, 0xf8, 0x1a, - 0x14, 0xb7, 0x68, 0xd7, 0x69, 0xed, 0xaa, 0x4d, 0x4b, 0x9a, 0x1d, 0xdf, 0xd0, 0xd2, 0x3c, 0x4e, - 0x43, 0x31, 0xda, 0xf1, 0x81, 0x17, 0xaa, 0xa0, 0x2e, 0x44, 0xb8, 0x5b, 0x21, 0xfe, 0xb9, 0x05, - 0xca, 0xcf, 0x08, 0x43, 0xa6, 0xcf, 0x75, 0x0d, 0xa5, 0x8f, 0x9a, 0xb0, 0x3f, 0xae, 0x29, 0x0c, - 0x51, 0x5f, 0x74, 0x09, 0xb2, 0xa1, 0xd8, 0x91, 0x33, 0x9b, 0x0e, 0x1f, 0x31, 0xd1, 0x3c, 0xc2, - 0xc3, 0x60, 0x7f, 0x5c, 0xd3, 0x84, 0x44, 0x0f, 0x50, 0x7d, 0xe2, 0x7c, 0x91, 0x8a, 0x2d, 0xee, - 0x8f, 0x6b, 0x06, 0xd6, 0x3c, 0x6f, 0xf0, 0xcf, 0x2c, 0x28, 0xdc, 0x73, 0xdc, 0x28, 0x84, 0x22, - 0x17, 0x59, 0x86, 0x8b, 0x78, 0x3a, 0xb7, 0x69, 0xdf, 0xd9, 0xbd, 0xea, 0x07, 0x82, 0xe7, 0x02, - 0x89, 0xe0, 0xf8, 0x48, 0xb0, 0x67, 0x1e, 0x09, 0xe9, 0xb9, 0x0b, 0xdb, 0x0d, 0x3b, 0x97, 0x5c, - 0x4a, 0xe1, 0xdf, 0x59, 0x50, 0x94, 0x92, 0xa9, 0xb0, 0xf8, 0x11, 0x64, 0xa4, 0xe0, 0x42, 0xb6, - 0x2f, 0x48, 0xfe, 0xb7, 0xe7, 0x49, 0x7c, 0xc5, 0x13, 0x7d, 0x07, 0x16, 0xdb, 0x81, 0x3f, 0x1c, - 0xd2, 0xf6, 0xb6, 0x2a, 0x31, 0xc9, 0xe9, 0x12, 0xb3, 0x61, 0xce, 0x93, 0x29, 0x72, 0xfc, 0x17, - 0x0b, 0x16, 0x54, 0x36, 0x2b, 0x5b, 0x46, 0x36, 0xb0, 0x5e, 0xba, 0xb8, 0x27, 0xe7, 0x2d, 0xee, - 0xc7, 0x20, 0xd3, 0x0d, 0xfc, 0xd1, 0x30, 0x2c, 0xa7, 0x64, 0xee, 0x48, 0x68, 0xbe, 0xa2, 0x8f, - 0x6f, 0xc0, 0xa2, 0x56, 0xe5, 0x90, 0x92, 0x56, 0x99, 0x2e, 0x69, 0x9b, 0x6d, 0x3a, 0x60, 0x6e, - 0xc7, 0x8d, 0x8a, 0x94, 0xa2, 0xc7, 0x9f, 0x58, 0xb0, 0x34, 0x4d, 0x82, 0xbe, 0x6d, 0xe4, 0x01, - 0x67, 0x77, 0xe6, 0x70, 0x76, 0x75, 0x51, 0x1c, 0xc2, 0x2b, 0x03, 0x16, 0xec, 0xea, 0x1c, 0xa9, - 0x7c, 0x00, 0x05, 0x03, 0xcd, 0x0f, 0x8f, 0x87, 0x54, 0xc7, 0x2c, 0x1f, 0xc6, 0xc9, 0x9a, 0x94, - 0x71, 0x2c, 0x80, 0x8b, 0xc9, 0x0b, 0x16, 0x8f, 0xf8, 0x85, 0x09, 0x4f, 0xa2, 0x0b, 0x60, 0x77, - 0x02, 0xdf, 0x9b, 0xcb, 0x4d, 0x62, 0x05, 0xfa, 0x3a, 0x24, 0x99, 0x3f, 0x97, 0x93, 0x92, 0xcc, - 0xe7, 0x3e, 0x52, 0xca, 0xa7, 0xe4, 0x0d, 0x4d, 0x42, 0xf8, 0x1b, 0x90, 0x17, 0x4a, 0xdd, 0x75, - 0xdc, 0x60, 0x66, 0x2d, 0x9f, 0xa9, 0x14, 0xbe, 0x04, 0x47, 0x64, 0x9d, 0x9a, 0xbd, 0xb8, 0x38, - 0x6b, 0x71, 0x51, 0x2f, 0x3e, 0x01, 0xe9, 0xf5, 0xde, 0x68, 0xf0, 0x90, 0x2f, 0x69, 0x3b, 0xcc, - 0xd1, 0x4b, 0xf8, 0x18, 0xbf, 0x01, 0xcb, 0x3c, 0x03, 0x69, 0x10, 0xae, 0xfb, 0xa3, 0x01, 0xd3, - 0x37, 0xe4, 0x73, 0x50, 0x9a, 0x44, 0xab, 0x18, 0x29, 0x41, 0xba, 0xc5, 0x11, 0x82, 0xc7, 0x02, - 0x91, 0x00, 0xfe, 0x95, 0x05, 0xe8, 0x1a, 0x65, 0x62, 0x97, 0xcd, 0x8d, 0xd0, 0xb8, 0x15, 0x79, - 0x0e, 0x6b, 0xf5, 0x68, 0x10, 0xea, 0x1b, 0x82, 0x86, 0xff, 0x1f, 0xb7, 0x22, 0x7c, 0x1e, 0x96, - 0x27, 0xa4, 0x54, 0x3a, 0x55, 0x20, 0xd7, 0x52, 0x38, 0x75, 0x1a, 0x45, 0x30, 0xfe, 0x7d, 0x12, - 0x72, 0x62, 0x01, 0xa1, 0x1d, 0x74, 0x1e, 0x0a, 0x1d, 0x77, 0xd0, 0xa5, 0xc1, 0x30, 0x70, 0x95, - 0x09, 0xec, 0xe6, 0x91, 0xfd, 0x71, 0xcd, 0x44, 0x13, 0x13, 0x40, 0xef, 0x40, 0x76, 0x14, 0xd2, - 0xe0, 0x81, 0x2b, 0xf3, 0x3c, 0xdf, 0x2c, 0xed, 0x8d, 0x6b, 0x99, 0xef, 0x87, 0x34, 0xd8, 0xdc, - 0xe0, 0xe7, 0xc2, 0x48, 0x8c, 0x88, 0xfc, 0xb6, 0xd1, 0x4d, 0x15, 0xa6, 0xe2, 0x8a, 0xd4, 0xfc, - 0x26, 0x17, 0x7f, 0xaa, 0xd0, 0x0d, 0x03, 0xdf, 0xa3, 0xac, 0x47, 0x47, 0x61, 0xa3, 0xe5, 0x7b, - 0x9e, 0x3f, 0x68, 0x88, 0x7e, 0x48, 0x28, 0xcd, 0x0f, 0x37, 0xbe, 0x5c, 0x45, 0xee, 0x3d, 0xc8, - 0xb2, 0x5e, 0xe0, 0x8f, 0xba, 0x3d, 0x51, 0xb7, 0x53, 0xcd, 0x8b, 0xf3, 0xf3, 0xd3, 0x1c, 0x88, - 0x1e, 0xa0, 0xd3, 0xdc, 0x5a, 0xb4, 0xf5, 0x30, 0x1c, 0x79, 0xb2, 0xcb, 0x68, 0xa6, 0xf7, 0xc7, - 0x35, 0xeb, 0x1d, 0x12, 0xa1, 0xf1, 0x27, 0x49, 0xa8, 0x89, 0x40, 0xbd, 0x2f, 0x0e, 0xf5, 0xab, - 0x7e, 0x70, 0x8b, 0xb2, 0xc0, 0x6d, 0xdd, 0x76, 0x3c, 0xaa, 0x63, 0xa3, 0x06, 0x05, 0x4f, 0x20, - 0x1f, 0x18, 0x29, 0x00, 0x5e, 0x44, 0x87, 0x4e, 0x01, 0x88, 0x9c, 0x91, 0xf3, 0x32, 0x1b, 0xf2, - 0x02, 0x23, 0xa6, 0xd7, 0x27, 0x2c, 0xd5, 0x98, 0x53, 0x33, 0x65, 0xa1, 0xcd, 0x69, 0x0b, 0xcd, - 0xcd, 0x27, 0x32, 0x8b, 0x19, 0xeb, 0xe9, 0xc9, 0x58, 0xc7, 0x7f, 0xb5, 0xa0, 0xba, 0xa5, 0x25, - 0x7f, 0x49, 0x73, 0x68, 0x7d, 0x93, 0xaf, 0x48, 0xdf, 0xd4, 0xff, 0xa6, 0x2f, 0xbe, 0x0e, 0xa5, - 0x2d, 0x77, 0x40, 0xaf, 0xba, 0x7d, 0x46, 0x83, 0x2b, 0x4f, 0x86, 0x01, 0x0d, 0x43, 0xde, 0x80, - 0x55, 0x20, 0xe7, 0x0f, 0x69, 0xe0, 0xe8, 0xae, 0x20, 0x45, 0x22, 0x98, 0x17, 0x0f, 0x61, 0x13, - 0x5d, 0xdb, 0x04, 0x80, 0xff, 0x6d, 0x14, 0x0f, 0x42, 0x3b, 0xda, 0x22, 0xeb, 0x46, 0xc5, 0x7e, - 0x15, 0x0a, 0x27, 0x5f, 0xa1, 0x83, 0x53, 0x53, 0xc5, 0xec, 0x02, 0x64, 0x3b, 0xc2, 0x10, 0xf2, - 0xe8, 0x2d, 0xac, 0x55, 0xe3, 0xb3, 0x6e, 0x96, 0x95, 0x88, 0x26, 0xc7, 0x1f, 0xc6, 0x25, 0x49, - 0xe8, 0xae, 0x4a, 0xd2, 0x19, 0xb0, 0x03, 0xda, 0xd1, 0x27, 0x27, 0x8a, 0xb9, 0x45, 0x94, 0x62, - 0x1e, 0xff, 0xc1, 0x82, 0xa5, 0x6b, 0x94, 0x4d, 0xde, 0x49, 0x5e, 0x23, 0xcb, 0xe1, 0xeb, 0x70, - 0xd4, 0x90, 0x5f, 0x69, 0xff, 0xde, 0xd4, 0x45, 0xe4, 0x8d, 0x58, 0xff, 0xcd, 0x41, 0x9b, 0x3e, - 0x51, 0x0d, 0xd6, 0xe4, 0x1d, 0xe4, 0x2e, 0x14, 0x8c, 0x49, 0x74, 0x79, 0xea, 0xf6, 0x61, 0x3c, - 0x1c, 0x44, 0x67, 0x68, 0xb3, 0xa4, 0x74, 0x92, 0x2d, 0x96, 0xba, 0x5b, 0x46, 0x67, 0xf5, 0x36, - 0x20, 0xd1, 0xf3, 0x09, 0xb6, 0xe6, 0x69, 0x21, 0xb0, 0x37, 0xa3, 0xcb, 0x48, 0x04, 0xa3, 0xd3, - 0x60, 0x07, 0xfe, 0x63, 0x7d, 0xad, 0x5c, 0x88, 0xb7, 0x24, 0xfe, 0x63, 0x22, 0xa6, 0xf0, 0x25, - 0x48, 0x11, 0xff, 0x31, 0xaa, 0x02, 0x04, 0xce, 0xa0, 0x4b, 0xef, 0x47, 0xdd, 0x46, 0x91, 0x18, - 0x98, 0x43, 0x4e, 0xf2, 0x75, 0x38, 0x6a, 0x4a, 0x24, 0xdd, 0x5d, 0x87, 0x2c, 0x47, 0xba, 0xb3, - 0x5e, 0xac, 0x04, 0xa1, 0x6c, 0x5c, 0x35, 0x11, 0x8f, 0x19, 0x88, 0xf1, 0xe8, 0x24, 0xe4, 0x99, - 0xb3, 0xd3, 0xa7, 0xb7, 0xe3, 0xba, 0x13, 0x23, 0xf8, 0x2c, 0x6f, 0x94, 0xee, 0x1b, 0x57, 0x92, - 0x18, 0x81, 0xce, 0xc2, 0x52, 0x2c, 0xf3, 0xdd, 0x80, 0x76, 0xdc, 0x27, 0xc2, 0xc3, 0x45, 0x72, - 0x00, 0x8f, 0x56, 0xe1, 0x48, 0x8c, 0xdb, 0x16, 0x47, 0xbf, 0x2d, 0x48, 0xa7, 0xd1, 0xdc, 0x36, - 0x42, 0xdd, 0x2b, 0x8f, 0x46, 0x4e, 0x5f, 0x14, 0xd3, 0x22, 0x31, 0x30, 0xf8, 0x8f, 0x16, 0x1c, - 0x95, 0xae, 0xe6, 0x2d, 0xf2, 0xeb, 0x18, 0xf5, 0xbf, 0xb6, 0x00, 0x99, 0x1a, 0xa8, 0xd0, 0xfa, - 0x9a, 0xf9, 0xf6, 0xc1, 0xef, 0x16, 0x05, 0xd1, 0xff, 0x49, 0x54, 0xfc, 0x7c, 0x81, 0x21, 0x23, - 0xee, 0x27, 0xb2, 0x11, 0xb5, 0x65, 0x83, 0x29, 0x31, 0x44, 0x7d, 0x79, 0x5f, 0xbc, 0xb3, 0xcb, - 0x68, 0xa8, 0xda, 0x43, 0xd1, 0x17, 0x0b, 0x04, 0x91, 0x1f, 0xbe, 0x17, 0x1d, 0x30, 0x11, 0x35, - 0x76, 0xbc, 0x97, 0x42, 0x11, 0x3d, 0xc0, 0xbf, 0x4d, 0xc2, 0xc2, 0x7d, 0xbf, 0x3f, 0x8a, 0x4f, - 0xaa, 0xd7, 0xa9, 0x2e, 0x4f, 0xf4, 0xad, 0x69, 0xdd, 0xb7, 0x22, 0xb0, 0x43, 0x46, 0x87, 0x22, - 0xb2, 0x52, 0x44, 0x8c, 0x11, 0x86, 0x22, 0x73, 0x82, 0x2e, 0x65, 0xb2, 0xe5, 0x28, 0x67, 0xc4, - 0x3d, 0x70, 0x02, 0x87, 0x56, 0xa0, 0xe0, 0x74, 0xbb, 0x01, 0xed, 0x3a, 0x8c, 0x36, 0x77, 0xcb, - 0x59, 0xb1, 0x99, 0x89, 0xc2, 0x3f, 0x84, 0x45, 0x6d, 0x2c, 0xe5, 0xd2, 0x77, 0x21, 0xfb, 0xb1, - 0xc0, 0xcc, 0x78, 0x27, 0x92, 0xa4, 0xaa, 0x8c, 0x69, 0xb2, 0xc9, 0xe7, 0x57, 0x2d, 0x33, 0xbe, - 0x01, 0x19, 0x49, 0x8e, 0x4e, 0x9a, 0x4d, 0x83, 0x7c, 0xd0, 0xe0, 0xb0, 0xea, 0x00, 0x30, 0x64, - 0x24, 0x23, 0xe5, 0x78, 0x11, 0x1b, 0x12, 0x43, 0xd4, 0xf7, 0xec, 0x19, 0xc8, 0x47, 0x6f, 0xa7, - 0xa8, 0x00, 0xd9, 0xab, 0x77, 0xc8, 0x0f, 0x2e, 0x93, 0x8d, 0xa5, 0x04, 0x2a, 0x42, 0xae, 0x79, - 0x79, 0xfd, 0xa6, 0x80, 0xac, 0xb5, 0x7f, 0xd9, 0xba, 0xb2, 0x04, 0xe8, 0x5b, 0x90, 0x96, 0xe5, - 0xe2, 0x58, 0x2c, 0xbf, 0xf9, 0x02, 0x5a, 0x39, 0x7e, 0x00, 0x2f, 0x2d, 0x80, 0x13, 0xef, 0x5a, - 0xe8, 0x36, 0x14, 0x04, 0x52, 0xbd, 0xb2, 0x9c, 0x9c, 0x7e, 0xec, 0x98, 0xe0, 0x74, 0xea, 0x90, - 0x59, 0x83, 0xdf, 0x45, 0x48, 0x0b, 0x9f, 0x98, 0xd2, 0x98, 0xaf, 0x64, 0xa6, 0x34, 0x13, 0xef, - 0x4e, 0x38, 0x81, 0x3e, 0x00, 0x9b, 0x77, 0x36, 0xc8, 0x38, 0x54, 0x8c, 0xc7, 0x91, 0xca, 0xb1, - 0x69, 0xb4, 0xb1, 0xed, 0x87, 0xd1, 0x1b, 0xcf, 0xf1, 0xe9, 0x5e, 0x56, 0x2f, 0x2f, 0x1f, 0x9c, - 0x88, 0x76, 0xbe, 0x23, 0x1f, 0x3b, 0x74, 0x4f, 0x85, 0x4e, 0x4d, 0x6e, 0x35, 0xd5, 0x82, 0x55, - 0xaa, 0x87, 0x4d, 0x47, 0x0c, 0xb7, 0xa0, 0x60, 0xf4, 0x33, 0xa6, 0x59, 0x0f, 0x36, 0x63, 0xa6, - 0x59, 0x67, 0x34, 0x41, 0x38, 0x81, 0xae, 0x41, 0x8e, 0x1f, 0xc5, 0xbc, 0x22, 0xa1, 0x13, 0xd3, - 0x27, 0xae, 0x51, 0x69, 0x2b, 0x27, 0x67, 0x4f, 0x46, 0x8c, 0xbe, 0x0b, 0xf9, 0x6b, 0x94, 0xa9, - 0x70, 0x3d, 0x3e, 0x1d, 0xef, 0x33, 0x2c, 0x35, 0x99, 0x33, 0x38, 0xb1, 0xf6, 0x63, 0xfd, 0x7f, - 0xca, 0x86, 0xc3, 0x1c, 0x74, 0x07, 0x16, 0x85, 0x60, 0xd1, 0x1f, 0x2e, 0x13, 0x01, 0x74, 0xe0, - 0xdf, 0x9d, 0x89, 0x00, 0x3a, 0xf8, 0x2f, 0x0f, 0x4e, 0x34, 0x3f, 0x7a, 0xf6, 0xbc, 0x9a, 0xf8, - 0xec, 0x79, 0x35, 0xf1, 0xf9, 0xf3, 0xaa, 0xf5, 0xd3, 0xbd, 0xaa, 0xf5, 0x9b, 0xbd, 0xaa, 0xf5, - 0x74, 0xaf, 0x6a, 0x3d, 0xdb, 0xab, 0x5a, 0xff, 0xd8, 0xab, 0x5a, 0xff, 0xdc, 0xab, 0x26, 0x3e, - 0xdf, 0xab, 0x5a, 0x9f, 0xbe, 0xa8, 0x26, 0x9e, 0xbd, 0xa8, 0x26, 0x3e, 0x7b, 0x51, 0x4d, 0x7c, - 0xf4, 0xe6, 0x17, 0x3d, 0x37, 0xe9, 0x1d, 0x77, 0x32, 0xe2, 0xf3, 0xde, 0x7f, 0x03, 0x00, 0x00, - 0xff, 0xff, 0xc7, 0xff, 0x87, 0xf1, 0x0e, 0x1b, 0x00, 0x00, + // 2279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x6f, 0x1b, 0xc7, + 0x95, 0x4b, 0x2e, 0xbf, 0x1e, 0x29, 0x59, 0x1e, 0x31, 0x36, 0x41, 0xdb, 0xa4, 0x3c, 0x48, 0x1d, + 0xc1, 0x71, 0xc8, 0x58, 0x69, 0x5d, 0xc7, 0x6e, 0xda, 0x9a, 0x92, 0x3f, 0x64, 0xcb, 0x1f, 0x19, + 0xb9, 0x6e, 0x61, 0xb4, 0x30, 0x56, 0xe2, 0x88, 0x22, 0xcc, 0xe5, 0xd2, 0xbb, 0xc3, 0xd8, 0x02, + 0x7a, 0xe8, 0x1f, 0x08, 0x90, 0x5b, 0xd1, 0x4b, 0xd1, 0x43, 0x81, 0x14, 0x28, 0x7a, 0xe9, 0x0f, + 0x68, 0x2f, 0x3d, 0xb8, 0x37, 0xf7, 0x16, 0xe4, 0xc0, 0xd6, 0xf2, 0xa5, 0xd0, 0x29, 0xb7, 0x00, + 0x3d, 0x15, 0xf3, 0xb5, 0x3b, 0xbb, 0xa2, 0x8c, 0xd0, 0x75, 0x11, 0xf8, 0xc2, 0x9d, 0xf7, 0xe6, + 0xcd, 0x9b, 0xf7, 0x35, 0xef, 0xcd, 0x1b, 0xc2, 0xb1, 0xe1, 0xc3, 0x6e, 0xab, 0xef, 0x75, 0x87, + 0xbe, 0xc7, 0xbc, 0x70, 0xd0, 0x14, 0xbf, 0xa8, 0xa0, 0xe1, 0x5a, 0xa5, 0xeb, 0x75, 0x3d, 0x49, + 0xc3, 0x47, 0x72, 0xbe, 0xd6, 0xe8, 0x7a, 0x5e, 0xb7, 0x4f, 0x5b, 0x02, 0xda, 0x18, 0x6d, 0xb5, + 0x58, 0xcf, 0xa5, 0x01, 0x73, 0xdc, 0xa1, 0x22, 0x58, 0x50, 0xdc, 0x1f, 0xf5, 0x5d, 0xaf, 0x43, + 0xfb, 0xad, 0x80, 0x39, 0x2c, 0x90, 0xbf, 0x8a, 0x62, 0x9e, 0x53, 0x0c, 0x47, 0xc1, 0xb6, 0xf8, + 0x91, 0x48, 0x5c, 0x01, 0xb4, 0xce, 0x7c, 0xea, 0xb8, 0xc4, 0x61, 0x34, 0x20, 0xf4, 0xd1, 0x88, + 0x06, 0x0c, 0xdf, 0x84, 0xf9, 0x18, 0x36, 0x18, 0x7a, 0x83, 0x80, 0xa2, 0x73, 0x50, 0x0a, 0x22, + 0x74, 0xd5, 0x5a, 0xc8, 0x2c, 0x96, 0x96, 0x2a, 0xcd, 0x50, 0x95, 0x68, 0x0d, 0x31, 0x09, 0xf1, + 0x6f, 0x2d, 0x80, 0x68, 0x0e, 0xd5, 0x01, 0xe4, 0xec, 0x35, 0x27, 0xd8, 0xae, 0x5a, 0x0b, 0xd6, + 0xa2, 0x4d, 0x0c, 0x0c, 0x3a, 0x03, 0x87, 0x23, 0xe8, 0x96, 0xb7, 0xbe, 0xed, 0xf8, 0x9d, 0x6a, + 0x5a, 0x90, 0xed, 0x9f, 0x40, 0x08, 0x6c, 0xdf, 0x61, 0xb4, 0x9a, 0x59, 0xb0, 0x16, 0x33, 0x44, + 0x8c, 0xd1, 0x11, 0xc8, 0x31, 0x3a, 0x70, 0x06, 0xac, 0x6a, 0x2f, 0x58, 0x8b, 0x45, 0xa2, 0x20, + 0x8e, 0xe7, 0xba, 0xd3, 0xa0, 0x9a, 0x5d, 0xb0, 0x16, 0x67, 0x88, 0x82, 0xf0, 0xe7, 0x19, 0x28, + 0x7f, 0x3c, 0xa2, 0xfe, 0x8e, 0x32, 0x00, 0xaa, 0x43, 0x21, 0xa0, 0x7d, 0xba, 0xc9, 0x3c, 0x5f, + 0x08, 0x58, 0x6c, 0xa7, 0xab, 0x16, 0x09, 0x71, 0xa8, 0x02, 0xd9, 0x7e, 0xcf, 0xed, 0x31, 0x21, + 0xd6, 0x0c, 0x91, 0x00, 0xba, 0x00, 0xd9, 0x80, 0x39, 0x3e, 0x13, 0xb2, 0x94, 0x96, 0x6a, 0x4d, + 0xe9, 0xb4, 0xa6, 0x76, 0x5a, 0xf3, 0xae, 0x76, 0x5a, 0xbb, 0xf0, 0x74, 0xdc, 0x48, 0x7d, 0xf6, + 0xcf, 0x86, 0x45, 0xe4, 0x12, 0x74, 0x0e, 0x32, 0x74, 0xd0, 0x11, 0xf2, 0x7e, 0xd3, 0x95, 0x7c, + 0x01, 0x3a, 0x0b, 0xc5, 0x4e, 0xcf, 0xa7, 0x9b, 0xac, 0xe7, 0x0d, 0x84, 0x56, 0xb3, 0x4b, 0xf3, + 0x91, 0x47, 0x56, 0xf4, 0x14, 0x89, 0xa8, 0xd0, 0x19, 0xc8, 0x05, 0xdc, 0x74, 0x41, 0x35, 0xbf, + 0x90, 0x59, 0x2c, 0xb6, 0x2b, 0x7b, 0xe3, 0xc6, 0x9c, 0xc4, 0x9c, 0xf1, 0xdc, 0x1e, 0xa3, 0xee, + 0x90, 0xed, 0x10, 0x45, 0x83, 0x4e, 0x43, 0xbe, 0x43, 0xfb, 0x94, 0x3b, 0xbc, 0x20, 0x1c, 0x3e, + 0x67, 0xb0, 0x17, 0x13, 0x44, 0x13, 0xa0, 0xfb, 0x60, 0x0f, 0xfb, 0xce, 0xa0, 0x5a, 0x14, 0x5a, + 0xcc, 0x46, 0x84, 0x77, 0xfa, 0xce, 0xa0, 0x7d, 0xee, 0xcb, 0x71, 0x63, 0xa9, 0xdb, 0x63, 0xdb, + 0xa3, 0x8d, 0xe6, 0xa6, 0xe7, 0xb6, 0xba, 0xbe, 0xb3, 0xe5, 0x0c, 0x9c, 0x56, 0xdf, 0x7b, 0xd8, + 0x6b, 0xf1, 0xe0, 0x7c, 0x34, 0xa2, 0x7e, 0x8f, 0xfa, 0x2d, 0xce, 0xa3, 0x29, 0xfc, 0xc1, 0xd7, + 0x11, 0xc1, 0xf3, 0xba, 0x5d, 0xc8, 0xcd, 0xe5, 0xf1, 0x38, 0x0d, 0x68, 0xdd, 0x71, 0x87, 0x7d, + 0x3a, 0x95, 0xbf, 0x42, 0xcf, 0xa4, 0x5f, 0xd9, 0x33, 0x99, 0x69, 0x3d, 0x13, 0x99, 0xd9, 0x9e, + 0xce, 0xcc, 0xd9, 0x6f, 0x6a, 0xe6, 0xdc, 0xeb, 0x37, 0x33, 0xae, 0x82, 0xcd, 0x21, 0x34, 0x07, + 0x19, 0xdf, 0x79, 0x2c, 0x8c, 0x59, 0x26, 0x7c, 0x88, 0xd7, 0x20, 0x27, 0x05, 0x41, 0xb5, 0xa4, + 0xb5, 0xe3, 0x27, 0x23, 0xb2, 0x74, 0x46, 0xdb, 0x70, 0x2e, 0xb2, 0x61, 0x46, 0x58, 0x07, 0xff, + 0xce, 0x82, 0x19, 0xe5, 0x42, 0x95, 0x5d, 0x36, 0x20, 0x2f, 0x4f, 0xb7, 0xce, 0x2c, 0x47, 0x93, + 0x99, 0xe5, 0x52, 0xc7, 0x19, 0x32, 0xea, 0xb7, 0x5b, 0x4f, 0xc7, 0x0d, 0xeb, 0xcb, 0x71, 0xe3, + 0x9d, 0x97, 0x69, 0x29, 0x92, 0x9c, 0xca, 0x3a, 0x9a, 0x31, 0x7a, 0x57, 0x48, 0xc7, 0x02, 0x15, + 0x07, 0x87, 0x9a, 0x32, 0x41, 0xae, 0x0e, 0xba, 0x34, 0xe0, 0x9c, 0x6d, 0xee, 0x42, 0x22, 0x69, + 0xf0, 0x2f, 0x61, 0x3e, 0x16, 0x6a, 0x4a, 0xce, 0xf3, 0x90, 0x0b, 0xb8, 0x01, 0xb5, 0x98, 0x86, + 0xa3, 0xd6, 0x05, 0xbe, 0x3d, 0xab, 0xe4, 0xcb, 0x49, 0x98, 0x28, 0xfa, 0xe9, 0x76, 0xff, 0x9b, + 0x05, 0xe5, 0x35, 0x67, 0x83, 0xf6, 0x75, 0x8c, 0x23, 0xb0, 0x07, 0x8e, 0x4b, 0x95, 0xc5, 0xc5, + 0x98, 0x27, 0xb4, 0x4f, 0x9c, 0xfe, 0x88, 0x4a, 0x96, 0x05, 0xa2, 0xa0, 0x69, 0x33, 0x91, 0xf5, + 0xca, 0x99, 0xc8, 0x8a, 0xe2, 0xbd, 0x02, 0x59, 0x1e, 0x59, 0x3b, 0x22, 0x0b, 0x15, 0x89, 0x04, + 0xf0, 0x3b, 0x30, 0xa3, 0xb4, 0x50, 0xe6, 0x8b, 0x44, 0xe6, 0xe6, 0x2b, 0x6a, 0x91, 0xb1, 0x0b, + 0x39, 0x69, 0x6d, 0xf4, 0x36, 0x14, 0xc3, 0xea, 0x26, 0xb4, 0xcd, 0xb4, 0x73, 0x7b, 0xe3, 0x46, + 0x9a, 0x05, 0x24, 0x9a, 0x40, 0x0d, 0xc8, 0x8a, 0x95, 0x42, 0x73, 0xab, 0x5d, 0xdc, 0x1b, 0x37, + 0x24, 0x82, 0xc8, 0x0f, 0x3a, 0x0e, 0xf6, 0x36, 0x2f, 0x30, 0xdc, 0x04, 0x76, 0xbb, 0xb0, 0x37, + 0x6e, 0x08, 0x98, 0x88, 0x5f, 0x7c, 0x15, 0xca, 0x6b, 0xb4, 0xeb, 0x6c, 0xee, 0xa8, 0x4d, 0x2b, + 0x9a, 0x1d, 0xdf, 0xd0, 0xd2, 0x3c, 0x4e, 0x42, 0x39, 0xdc, 0xf1, 0x81, 0x1b, 0xa8, 0xa0, 0x2e, + 0x85, 0xb8, 0x9b, 0x01, 0xfe, 0x8d, 0x05, 0xca, 0xcf, 0x08, 0x43, 0xae, 0xcf, 0x75, 0x0d, 0x54, + 0x0e, 0x82, 0xbd, 0x71, 0x43, 0x61, 0x88, 0xfa, 0xa2, 0x8b, 0x90, 0x0f, 0xc4, 0x8e, 0x9c, 0x59, + 0x32, 0x7c, 0xc4, 0x44, 0xfb, 0x10, 0x0f, 0x83, 0xbd, 0x71, 0x43, 0x13, 0x12, 0x3d, 0x40, 0xcd, + 0x58, 0xe5, 0x94, 0x8a, 0xcd, 0xee, 0x8d, 0x1b, 0x06, 0xd6, 0xac, 0xa4, 0xf8, 0x6b, 0x0b, 0x4a, + 0x77, 0x9d, 0x5e, 0x18, 0x42, 0x55, 0xed, 0xa2, 0x28, 0x47, 0x4a, 0x04, 0x3f, 0xd2, 0x1d, 0xda, + 0x77, 0x76, 0xae, 0x78, 0xbe, 0xe0, 0x3b, 0x43, 0x42, 0x38, 0x2a, 0x76, 0xf6, 0xc4, 0x62, 0x97, + 0x9d, 0x3e, 0xa5, 0xfe, 0x1f, 0x13, 0xd8, 0x75, 0xbb, 0x90, 0x9e, 0xcb, 0xe0, 0x3f, 0x59, 0x50, + 0x96, 0x9a, 0xab, 0xb0, 0xfb, 0x39, 0xe4, 0xa4, 0x61, 0x84, 0xee, 0x2f, 0x49, 0x2e, 0xef, 0x4e, + 0x93, 0x58, 0x14, 0x4f, 0xf4, 0x23, 0x98, 0xed, 0xf8, 0xde, 0x70, 0x48, 0x3b, 0xeb, 0x2a, 0x85, + 0xa5, 0x93, 0x29, 0x6c, 0xc5, 0x9c, 0x27, 0x09, 0x72, 0xfc, 0x77, 0x0b, 0x66, 0x54, 0xb6, 0x50, + 0xbe, 0x0a, 0xed, 0x6b, 0xbd, 0x72, 0xc9, 0x4a, 0x4f, 0x5b, 0xb2, 0x8e, 0x40, 0xae, 0xeb, 0x7b, + 0xa3, 0x61, 0x50, 0xcd, 0xc8, 0xb3, 0x29, 0xa1, 0xe9, 0x4a, 0x19, 0xbe, 0x0e, 0xb3, 0x5a, 0x95, + 0x03, 0x52, 0x66, 0x2d, 0x99, 0x32, 0x57, 0x3b, 0x74, 0xc0, 0x7a, 0x5b, 0xbd, 0x30, 0x09, 0x2a, + 0x7a, 0xfc, 0xa9, 0x05, 0x73, 0x49, 0x12, 0xf4, 0x43, 0xe3, 0x9c, 0x71, 0x76, 0xa7, 0x0e, 0x66, + 0xd7, 0x14, 0xc9, 0x27, 0xb8, 0x3c, 0x60, 0xfe, 0x8e, 0x3e, 0x83, 0xb5, 0x0f, 0xa1, 0x64, 0xa0, + 0x79, 0x71, 0x7a, 0x48, 0xd5, 0x99, 0x20, 0x7c, 0x18, 0x25, 0x83, 0xb4, 0x4c, 0x65, 0x02, 0xb8, + 0x90, 0x3e, 0x6f, 0xe1, 0x5f, 0x5b, 0x30, 0x13, 0xf3, 0x24, 0x3a, 0x0f, 0xf6, 0x96, 0xef, 0xb9, + 0x53, 0xb9, 0x49, 0xac, 0x40, 0xdf, 0x85, 0x34, 0xf3, 0xa6, 0x72, 0x52, 0x9a, 0x79, 0xdc, 0x47, + 0x4a, 0xf9, 0x8c, 0xbc, 0xdb, 0x4a, 0x08, 0x7f, 0x0f, 0x8a, 0x42, 0xa9, 0x3b, 0x4e, 0xcf, 0x9f, + 0x58, 0x2b, 0x26, 0x2a, 0x85, 0x2f, 0xc2, 0x21, 0x99, 0x07, 0x27, 0x2f, 0x2e, 0x4f, 0x5a, 0x5c, + 0xd6, 0x8b, 0x8f, 0x41, 0x76, 0x79, 0x7b, 0x34, 0x78, 0xc8, 0x97, 0x74, 0x1c, 0xe6, 0xe8, 0x25, + 0x7c, 0x8c, 0xdf, 0x82, 0x79, 0x7e, 0x02, 0xa9, 0x1f, 0x2c, 0x7b, 0xa3, 0x01, 0xd3, 0xbd, 0xc5, + 0x19, 0xa8, 0xc4, 0xd1, 0x2a, 0x46, 0x2a, 0x90, 0xdd, 0xe4, 0x08, 0xc1, 0x63, 0x86, 0x48, 0x00, + 0xff, 0xde, 0x02, 0x74, 0x95, 0x32, 0xb1, 0xcb, 0xea, 0x4a, 0x78, 0x38, 0x6a, 0x50, 0x70, 0x1d, + 0xb6, 0xb9, 0x4d, 0xfd, 0x40, 0xdf, 0x40, 0x34, 0xfc, 0x6d, 0xdc, 0xf5, 0xf0, 0x59, 0x98, 0x8f, + 0x49, 0xa9, 0x74, 0xaa, 0x41, 0x61, 0x53, 0xe1, 0x54, 0xb5, 0x0b, 0x61, 0xfc, 0xe7, 0x34, 0x14, + 0xc4, 0x02, 0x42, 0xb7, 0xd0, 0x59, 0x28, 0x6d, 0xf5, 0x06, 0x5d, 0xea, 0x0f, 0xfd, 0x9e, 0x32, + 0x81, 0xdd, 0x3e, 0xb4, 0x37, 0x6e, 0x98, 0x68, 0x62, 0x02, 0xe8, 0x3d, 0xc8, 0x8f, 0x02, 0xea, + 0x3f, 0xe8, 0xc9, 0x73, 0x5e, 0x6c, 0x57, 0x76, 0xc7, 0x8d, 0xdc, 0x4f, 0x02, 0xea, 0xaf, 0xae, + 0xf0, 0xba, 0x33, 0x12, 0x23, 0x22, 0xbf, 0x1d, 0x74, 0x43, 0x85, 0xa9, 0xb8, 0x82, 0xb5, 0xbf, + 0xcf, 0xc5, 0x4f, 0x24, 0xba, 0xa1, 0xef, 0xb9, 0x94, 0x6d, 0xd3, 0x51, 0xd0, 0xda, 0xf4, 0x5c, + 0xd7, 0x1b, 0xb4, 0x44, 0x27, 0x29, 0x94, 0xe6, 0xc5, 0x93, 0x2f, 0x57, 0x91, 0x7b, 0x17, 0xf2, + 0x6c, 0xdb, 0xf7, 0x46, 0xdd, 0x6d, 0x51, 0x13, 0x32, 0xed, 0x0b, 0xd3, 0xf3, 0xd3, 0x1c, 0x88, + 0x1e, 0xa0, 0x93, 0xdc, 0x5a, 0x74, 0xf3, 0x61, 0x30, 0x72, 0x65, 0x7f, 0xd6, 0xce, 0xee, 0x8d, + 0x1b, 0xd6, 0x7b, 0x24, 0x44, 0xe3, 0x4f, 0xd3, 0xd0, 0x10, 0x81, 0x7a, 0x4f, 0x5c, 0x1a, 0xae, + 0x78, 0xfe, 0x4d, 0xca, 0xfc, 0xde, 0xe6, 0x2d, 0xc7, 0xa5, 0x3a, 0x36, 0x1a, 0x50, 0x72, 0x05, + 0xf2, 0x81, 0x71, 0x04, 0xc0, 0x0d, 0xe9, 0xd0, 0x09, 0x00, 0x71, 0x66, 0xe4, 0xbc, 0x3c, 0x0d, + 0x45, 0x81, 0x11, 0xd3, 0xcb, 0x31, 0x4b, 0xb5, 0xa6, 0xd4, 0x4c, 0x59, 0x68, 0x35, 0x69, 0xa1, + 0xa9, 0xf9, 0x84, 0x66, 0x31, 0x63, 0x3d, 0x1b, 0x8f, 0x75, 0xfc, 0x0f, 0x0b, 0xea, 0x6b, 0x5a, + 0xf2, 0x57, 0x34, 0x87, 0xd6, 0x37, 0xfd, 0x9a, 0xf4, 0xcd, 0xfc, 0x6f, 0xfa, 0xe2, 0x6b, 0x50, + 0x59, 0xeb, 0x0d, 0xe8, 0x95, 0x5e, 0x9f, 0x51, 0xff, 0xf2, 0x93, 0xa1, 0x4f, 0x83, 0x80, 0xb7, + 0xad, 0x35, 0x28, 0x78, 0x43, 0xea, 0x3b, 0xba, 0xeb, 0xc8, 0x90, 0x10, 0xe6, 0xc9, 0x43, 0xd8, + 0x44, 0xe7, 0x36, 0x01, 0xe0, 0xff, 0x18, 0xc9, 0x83, 0xd0, 0x2d, 0x6d, 0x91, 0x65, 0x23, 0x63, + 0xbf, 0x0e, 0x85, 0xd3, 0xaf, 0xd1, 0xc1, 0x99, 0x44, 0x32, 0x3b, 0x0f, 0xf9, 0x2d, 0x61, 0x08, + 0x59, 0x7a, 0x4b, 0x4b, 0xf5, 0xa8, 0xd6, 0x4d, 0xb2, 0x12, 0xd1, 0xe4, 0xf8, 0xa3, 0x28, 0x25, + 0x09, 0xdd, 0x55, 0x4a, 0x3a, 0x05, 0xb6, 0x4f, 0xb7, 0x74, 0xe5, 0x44, 0x11, 0xb7, 0x90, 0x52, + 0xcc, 0xe3, 0xbf, 0x58, 0x30, 0x77, 0x95, 0xb2, 0xf8, 0x9d, 0xe4, 0x0d, 0xb2, 0x1c, 0xbe, 0x06, + 0x87, 0x0d, 0xf9, 0x95, 0xf6, 0x1f, 0x24, 0x2e, 0x22, 0x6f, 0x45, 0xfa, 0xaf, 0x0e, 0x3a, 0xf4, + 0x89, 0x6a, 0xe0, 0xe2, 0x77, 0x90, 0x3b, 0x50, 0x32, 0x26, 0xd1, 0xa5, 0xc4, 0xed, 0xc3, 0x78, + 0x6e, 0x09, 0x6b, 0x68, 0xbb, 0xa2, 0x74, 0x92, 0x2d, 0x9c, 0xba, 0x5b, 0x86, 0xb5, 0x7a, 0x1d, + 0x90, 0xb8, 0xb6, 0x0a, 0xb6, 0x66, 0xb5, 0x10, 0xd8, 0x1b, 0xe1, 0x65, 0x24, 0x84, 0xd1, 0x49, + 0xb0, 0x7d, 0xef, 0xb1, 0xbe, 0x56, 0xce, 0x44, 0x5b, 0x12, 0xef, 0x31, 0x11, 0x53, 0xf8, 0x22, + 0x64, 0x88, 0xf7, 0x18, 0xd5, 0x01, 0x7c, 0x67, 0xd0, 0xa5, 0xf7, 0xc2, 0x6e, 0xa6, 0x4c, 0x0c, + 0xcc, 0x01, 0x95, 0x7c, 0x19, 0x0e, 0x9b, 0x12, 0x49, 0x77, 0x37, 0x21, 0xff, 0xf1, 0xc8, 0x34, + 0x57, 0x25, 0x61, 0x2e, 0xd9, 0x18, 0x6b, 0x22, 0x1e, 0x33, 0x10, 0xe1, 0xd1, 0x71, 0x28, 0x32, + 0x67, 0xa3, 0x4f, 0x6f, 0x45, 0x79, 0x27, 0x42, 0xf0, 0x59, 0xde, 0x88, 0xdd, 0x33, 0xae, 0x24, + 0x11, 0x02, 0x9d, 0x86, 0xb9, 0x48, 0xe6, 0x3b, 0x3e, 0xdd, 0xea, 0x3d, 0x11, 0x1e, 0x2e, 0x93, + 0x7d, 0x78, 0xb4, 0x08, 0x87, 0x22, 0xdc, 0xba, 0x28, 0xfd, 0xb6, 0x20, 0x4d, 0xa2, 0xb9, 0x6d, + 0x84, 0xba, 0x97, 0x1f, 0x8d, 0x9c, 0xbe, 0x48, 0xa6, 0x65, 0x62, 0x60, 0xf0, 0x5f, 0x2d, 0x38, + 0x2c, 0x5d, 0xcd, 0x5b, 0xf0, 0x37, 0x31, 0xea, 0x3f, 0xb7, 0x00, 0x99, 0x1a, 0xa8, 0xd0, 0xfa, + 0x8e, 0xf9, 0xb6, 0xc2, 0xef, 0x16, 0x25, 0xd1, 0x5f, 0x4a, 0x54, 0xf4, 0x3c, 0x82, 0x21, 0x27, + 0xee, 0x27, 0xb2, 0xd1, 0xb5, 0x65, 0x03, 0x2b, 0x31, 0x44, 0x7d, 0x79, 0xdf, 0xbd, 0xb1, 0xc3, + 0x68, 0xa0, 0xda, 0x4f, 0xd1, 0x77, 0x0b, 0x04, 0x91, 0x1f, 0xbe, 0x17, 0x1d, 0x30, 0x11, 0x35, + 0x76, 0xb4, 0x97, 0x42, 0x11, 0x3d, 0xc0, 0x7f, 0x4c, 0xc3, 0xcc, 0x3d, 0xaf, 0x3f, 0x8a, 0x2a, + 0xd5, 0x9b, 0x94, 0x97, 0x63, 0x3d, 0x71, 0x56, 0xf7, 0xc4, 0x08, 0xec, 0x80, 0xd1, 0xa1, 0x88, + 0xac, 0x0c, 0x11, 0x63, 0x84, 0xa1, 0xcc, 0x1c, 0xbf, 0x4b, 0x99, 0x6c, 0x39, 0xaa, 0x39, 0x71, + 0x0f, 0x8c, 0xe1, 0xd0, 0x02, 0x94, 0x9c, 0x6e, 0xd7, 0xa7, 0x5d, 0x87, 0xd1, 0xf6, 0x4e, 0x35, + 0x2f, 0x36, 0x33, 0x51, 0xf8, 0x67, 0x30, 0xab, 0x8d, 0xa5, 0x5c, 0xfa, 0x3e, 0xe4, 0x3f, 0x11, + 0x98, 0x09, 0xef, 0x50, 0x92, 0x54, 0xa5, 0x31, 0x4d, 0x16, 0x7f, 0xb4, 0xd6, 0x32, 0xe3, 0xeb, + 0x90, 0x93, 0xe4, 0xe8, 0xb8, 0xd9, 0x34, 0xc8, 0x07, 0x13, 0x0e, 0xab, 0x0e, 0x00, 0x43, 0x4e, + 0x32, 0x52, 0x8e, 0x17, 0xb1, 0x21, 0x31, 0x44, 0x7d, 0x4f, 0x9f, 0x82, 0x62, 0xf8, 0xe2, 0x8c, + 0x4a, 0x90, 0xbf, 0x72, 0x9b, 0xfc, 0xf4, 0x12, 0x59, 0x99, 0x4b, 0xa1, 0x32, 0x14, 0xda, 0x97, + 0x96, 0x6f, 0x08, 0xc8, 0x5a, 0xfa, 0xda, 0xd6, 0x99, 0xc5, 0x47, 0x3f, 0x80, 0xac, 0x4c, 0x17, + 0x47, 0x22, 0xf9, 0xcd, 0xb7, 0xdd, 0xda, 0xd1, 0x7d, 0x78, 0x69, 0x01, 0x9c, 0x7a, 0xdf, 0x42, + 0xb7, 0xa0, 0x24, 0x90, 0xea, 0x15, 0xe7, 0x78, 0xf2, 0x31, 0x25, 0xc6, 0xe9, 0xc4, 0x01, 0xb3, + 0x06, 0xbf, 0x0b, 0x90, 0x15, 0x3e, 0x31, 0xa5, 0x31, 0x5f, 0xe1, 0x4c, 0x69, 0x62, 0xef, 0x5a, + 0x38, 0x85, 0x3e, 0x04, 0x9b, 0x77, 0x36, 0xc8, 0x28, 0x2a, 0xc6, 0xe3, 0x4b, 0xed, 0x48, 0x12, + 0x6d, 0x6c, 0xfb, 0x51, 0xf8, 0x86, 0x74, 0x34, 0xd9, 0xcb, 0xea, 0xe5, 0xd5, 0xfd, 0x13, 0xe1, + 0xce, 0xb7, 0xe5, 0x63, 0x87, 0xee, 0xa9, 0xd0, 0x89, 0xf8, 0x56, 0x89, 0x16, 0xac, 0x56, 0x3f, + 0x68, 0x3a, 0x64, 0xb8, 0x06, 0x25, 0xa3, 0x9f, 0x31, 0xcd, 0xba, 0xbf, 0x19, 0x33, 0xcd, 0x3a, + 0xa1, 0x09, 0xc2, 0x29, 0x74, 0x15, 0x0a, 0xbc, 0x14, 0xf3, 0x8c, 0x84, 0x8e, 0x25, 0x2b, 0xae, + 0x91, 0x69, 0x6b, 0xc7, 0x27, 0x4f, 0x86, 0x8c, 0x7e, 0x0c, 0xc5, 0xab, 0x94, 0xa9, 0x70, 0x3d, + 0x9a, 0x8c, 0xf7, 0x09, 0x96, 0x8a, 0x9f, 0x19, 0x9c, 0x5a, 0xfa, 0x85, 0xfe, 0x27, 0x6a, 0xc5, + 0x61, 0x0e, 0xba, 0x0d, 0xb3, 0x42, 0xb0, 0xf0, 0xaf, 0xaa, 0x58, 0x00, 0xed, 0xfb, 0x5f, 0x2c, + 0x16, 0x40, 0xfb, 0xff, 0x1f, 0xc3, 0xa9, 0xf6, 0xfd, 0x67, 0xcf, 0xeb, 0xa9, 0x2f, 0x9e, 0xd7, + 0x53, 0x5f, 0x3d, 0xaf, 0x5b, 0xbf, 0xda, 0xad, 0x5b, 0x7f, 0xd8, 0xad, 0x5b, 0x4f, 0x77, 0xeb, + 0xd6, 0xb3, 0xdd, 0xba, 0xf5, 0xaf, 0xdd, 0xba, 0xf5, 0xef, 0xdd, 0x7a, 0xea, 0xab, 0xdd, 0xba, + 0xf5, 0xd9, 0x8b, 0x7a, 0xea, 0xd9, 0x8b, 0x7a, 0xea, 0x8b, 0x17, 0xf5, 0xd4, 0xfd, 0xb7, 0x5f, + 0xf6, 0xdc, 0xa4, 0x77, 0xdc, 0xc8, 0x89, 0xcf, 0x07, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x8b, + 0xe7, 0x73, 0xe7, 0x48, 0x1c, 0x00, 0x00, } func (x Direction) String() string { @@ -2758,6 +2814,13 @@ func (this *QueryRequest) Equal(that interface{}) bool { return false } } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } return true } func (this *SampleQueryRequest) Equal(that interface{}) bool { @@ -2804,6 +2867,37 @@ func (this *SampleQueryRequest) Equal(that interface{}) bool { return false } } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } + return true +} +func (this *Plan) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Plan) + if !ok { + that2, ok := that.(Plan) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Raw, that1.Raw) { + return false + } return true } func (this *Delete) Equal(that interface{}) bool { @@ -3096,6 +3190,13 @@ func (this *TailRequest) Equal(that interface{}) bool { if !this.Start.Equal(that1.Start) { return false } + if that1.Plan == nil { + if this.Plan != nil { + return false + } + } else if !this.Plan.Equal(*that1.Plan) { + return false + } return true } func (this *TailResponse) Equal(that interface{}) bool { @@ -4063,7 +4164,7 @@ func (this *QueryRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 11) + s := make([]string, 0, 12) s = append(s, "&logproto.QueryRequest{") s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") @@ -4074,6 +4175,7 @@ func (this *QueryRequest) GoString() string { if this.Deletes != nil { s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n") } + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -4081,7 +4183,7 @@ func (this *SampleQueryRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 10) s = append(s, "&logproto.SampleQueryRequest{") s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") @@ -4090,6 +4192,17 @@ func (this *SampleQueryRequest) GoString() string { if this.Deletes != nil { s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n") } + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Plan) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.Plan{") + s = append(s, "Raw: "+fmt.Sprintf("%#v", this.Raw)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -4196,12 +4309,13 @@ func (this *TailRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&logproto.TailRequest{") s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") s = append(s, "DelayFor: "+fmt.Sprintf("%#v", this.DelayFor)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -5258,6 +5372,18 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if len(m.Deletes) > 0 { for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- { { @@ -5286,21 +5412,21 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintLogproto(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) if err2 != nil { return 0, err2 } i -= n2 i = encodeVarintLogproto(dAtA, i, uint64(n2)) i-- + dAtA[i] = 0x22 + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintLogproto(dAtA, i, uint64(n3)) + i-- dAtA[i] = 0x1a if m.Limit != 0 { i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) @@ -5337,6 +5463,18 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.Deletes) > 0 { for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- { { @@ -5360,20 +5498,20 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err3 != nil { - return 0, err3 + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err5 != nil { + return 0, err5 } - i -= n3 - i = encodeVarintLogproto(dAtA, i, uint64(n3)) + i -= n5 + i = encodeVarintLogproto(dAtA, i, uint64(n5)) i-- dAtA[i] = 0x1a - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err4 != nil { - return 0, err4 + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err6 != nil { + return 0, err6 } - i -= n4 - i = encodeVarintLogproto(dAtA, i, uint64(n4)) + i -= n6 + i = encodeVarintLogproto(dAtA, i, uint64(n6)) i-- dAtA[i] = 0x12 if len(m.Selector) > 0 { @@ -5386,6 +5524,36 @@ func (m *SampleQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Plan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Plan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Raw) > 0 { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Delete) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5548,22 +5716,22 @@ func (m *LabelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x2a } if m.End != nil { - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):]) - if err7 != nil { - return 0, err7 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):]) + if err9 != nil { + return 0, err9 } - i -= n7 - i = encodeVarintLogproto(dAtA, i, uint64(n7)) + i -= n9 + i = encodeVarintLogproto(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x22 } if m.Start != nil { - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):]) - if err8 != nil { - return 0, err8 + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):]) + if err10 != nil { + return 0, err10 } - i -= n8 - i = encodeVarintLogproto(dAtA, i, uint64(n8)) + i -= n10 + i = encodeVarintLogproto(dAtA, i, uint64(n10)) i-- dAtA[i] = 0x1a } @@ -5761,12 +5929,24 @@ func (m *TailRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err9 != nil { - return 0, err9 + if m.Plan != nil { + { + size := m.Plan.Size() + i -= size + if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLogproto(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i -= n9 - i = encodeVarintLogproto(dAtA, i, uint64(n9)) + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err12 != nil { + return 0, err12 + } + i -= n12 + i = encodeVarintLogproto(dAtA, i, uint64(n12)) i-- dAtA[i] = 0x2a if m.Limit != 0 { @@ -5876,20 +6056,20 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x1a } } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err11 != nil { - return 0, err11 + n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err14 != nil { + return 0, err14 } - i -= n11 - i = encodeVarintLogproto(dAtA, i, uint64(n11)) + i -= n14 + i = encodeVarintLogproto(dAtA, i, uint64(n14)) i-- dAtA[i] = 0x12 - n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err12 != nil { - return 0, err12 + n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err15 != nil { + return 0, err15 } - i -= n12 - i = encodeVarintLogproto(dAtA, i, uint64(n12)) + i -= n15 + i = encodeVarintLogproto(dAtA, i, uint64(n15)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -6001,20 +6181,20 @@ func (m *DroppedStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.To):]) - if err13 != nil { - return 0, err13 + n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.To):]) + if err16 != nil { + return 0, err16 } - i -= n13 - i = encodeVarintLogproto(dAtA, i, uint64(n13)) + i -= n16 + i = encodeVarintLogproto(dAtA, i, uint64(n16)) i-- dAtA[i] = 0x12 - n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.From):]) - if err14 != nil { - return 0, err14 + n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.From):]) + if err17 != nil { + return 0, err17 } - i -= n14 - i = encodeVarintLogproto(dAtA, i, uint64(n14)) + i -= n17 + i = encodeVarintLogproto(dAtA, i, uint64(n17)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -6195,20 +6375,20 @@ func (m *GetChunkIDsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err15 != nil { - return 0, err15 + n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err18 != nil { + return 0, err18 } - i -= n15 - i = encodeVarintLogproto(dAtA, i, uint64(n15)) + i -= n18 + i = encodeVarintLogproto(dAtA, i, uint64(n18)) i-- dAtA[i] = 0x1a - n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err16 != nil { - return 0, err16 + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err19 != nil { + return 0, err19 } - i -= n16 - i = encodeVarintLogproto(dAtA, i, uint64(n16)) + i -= n19 + i = encodeVarintLogproto(dAtA, i, uint64(n19)) i-- dAtA[i] = 0x12 if len(m.Matchers) > 0 { @@ -7131,6 +7311,10 @@ func (m *QueryRequest) Size() (n int) { n += 1 + l + sovLogproto(uint64(l)) } } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -7160,6 +7344,23 @@ func (m *SampleQueryRequest) Size() (n int) { n += 1 + l + sovLogproto(uint64(l)) } } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *Plan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Raw) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -7332,6 +7533,10 @@ func (m *TailRequest) Size() (n int) { } l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) n += 1 + l + sovLogproto(uint64(l)) + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -7954,6 +8159,7 @@ func (this *QueryRequest) String() string { `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`, `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, `Deletes:` + repeatedStringForDeletes + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `}`, }, "") return s @@ -7973,6 +8179,17 @@ func (this *SampleQueryRequest) String() string { `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, `Deletes:` + repeatedStringForDeletes + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, + `}`, + }, "") + return s +} +func (this *Plan) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Plan{`, + `Raw:` + fmt.Sprintf("%v", this.Raw) + `,`, `}`, }, "") return s @@ -8084,6 +8301,7 @@ func (this *TailRequest) String() string { `DelayFor:` + fmt.Sprintf("%v", this.DelayFor) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `}`, }, "") return s @@ -9022,6 +9240,42 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -9239,6 +9493,129 @@ func (m *SampleQueryRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Plan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Plan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) @@ -10375,6 +10752,42 @@ func (m *TailRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index 35824a78e202e..7cd6f32fe58d5 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -51,7 +51,7 @@ message StreamRate { } message QueryRequest { - string selector = 1; + string selector = 1 [deprecated = true]; uint32 limit = 2; google.protobuf.Timestamp start = 3 [ (gogoproto.stdtime) = true, @@ -65,10 +65,11 @@ message QueryRequest { reserved 6; repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"]; repeated Delete deletes = 8; + Plan plan = 9 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; } message SampleQueryRequest { - string selector = 1; + string selector = 1 [deprecated = true]; // mark as reserved once we've fully migrated to plan. google.protobuf.Timestamp start = 2 [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false @@ -79,6 +80,11 @@ message SampleQueryRequest { ]; repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; repeated Delete deletes = 5; + Plan plan = 6 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; +} + +message Plan { + bytes raw = 1; } message Delete { @@ -148,7 +154,7 @@ message Series { } message TailRequest { - string query = 1; + string query = 1 [deprecated = true]; reserved 2; uint32 delayFor = 3; uint32 limit = 4; @@ -156,6 +162,7 @@ message TailRequest { (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; + Plan plan = 6 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; } message TailResponse { diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go index 2cd706c812f6a..27cb3e849fa2c 100644 --- a/pkg/logql/downstream.go +++ b/pkg/logql/downstream.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -155,6 +156,50 @@ func (c ConcatLogSelectorExpr) string(maxDepth int) string { return fmt.Sprintf("%s ++ %s", c.DownstreamLogSelectorExpr.String(), c.next.string(maxDepth-1)) } +// QuantileSketchEvalExpr evaluates a quantile sketch to the actual quantile. +type QuantileSketchEvalExpr struct { + syntax.SampleExpr + quantileMergeExpr *QuantileSketchMergeExpr + quantile *float64 +} + +func (e QuantileSketchEvalExpr) String() string { + return fmt.Sprintf("quantileSketchEval<%s>", e.quantileMergeExpr.String()) +} + +func (e *QuantileSketchEvalExpr) Walk(f syntax.WalkFn) { + f(e) + e.quantileMergeExpr.Walk(f) +} + +type QuantileSketchMergeExpr struct { + syntax.SampleExpr + downstreams []DownstreamSampleExpr +} + +func (e QuantileSketchMergeExpr) String() string { + var sb strings.Builder + for i, d := range e.downstreams { + if i >= defaultMaxDepth { + break + } + + if i > 0 { + sb.WriteString(" ++ ") + } + + sb.WriteString(d.String()) + } + return fmt.Sprintf("quantileSketchMerge<%s>", sb.String()) +} + +func (e *QuantileSketchMergeExpr) Walk(f syntax.WalkFn) { + f(e) + for _, d := range e.downstreams { + d.Walk(f) + } +} + type Shards []astmapper.ShardAnnotation func (xs Shards) Encode() (encoded []string) { @@ -308,6 +353,47 @@ func (ev *DownstreamEvaluator) NewStepEvaluator( } return NewConcatStepEvaluator(xs), nil + case *QuantileSketchEvalExpr: + var queries []DownstreamQuery + if e.quantileMergeExpr != nil { + for _, d := range e.quantileMergeExpr.downstreams { + qry := DownstreamQuery{ + Params: ParamsWithExpressionOverride{ + Params: params, + ExpressionOverride: d.SampleExpr, + }, + } + if shard := d.shard; shard != nil { + qry.Params = ParamsWithShardsOverride{ + Params: qry.Params, + ShardsOverride: Shards{*shard}.Encode(), + } + } + queries = append(queries, qry) + } + } + + results, err := ev.Downstream(ctx, queries) + if err != nil { + return nil, fmt.Errorf("error running quantile sketch downstream query: %w", err) + } + + xs := make([]StepEvaluator, 0, len(queries)) + for _, res := range results { + if res.Data.Type() != QuantileSketchMatrixType { + return nil, fmt.Errorf("unexpected matrix data type: got (%s), want (%s)", res.Data.Type(), QuantileSketchMatrixType) + } + data, ok := res.Data.(ProbabilisticQuantileMatrix) + if !ok { + return nil, fmt.Errorf("unexpected matrix type: got (%T), want (ProbabilisticQuantileMatrix)", res.Data) + } + stepper := NewQuantileSketchMatrixStepEvaluator(data, params) + xs = append(xs, stepper) + } + + inner := NewQuantileSketchMergeStepEvaluator(xs) + + return NewQuantileSketchVectorStepEvaluator(inner, *e.quantile), nil default: return ev.defaultEvaluator.NewStepEvaluator(ctx, nextEvFactory, e, params) diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go index 0f4d1cd09984d..218957f862bb1 100644 --- a/pkg/logql/downstream_test.go +++ b/pkg/logql/downstream_test.go @@ -54,6 +54,7 @@ func TestMappingEquivalence(t *testing.T) { {`sum(rate({a=~".+"} |= "foo" != "foo"[1s]) or vector(1))`, false}, {`avg_over_time({a=~".+"} | logfmt | unwrap value [1s])`, false}, {`avg_over_time({a=~".+"} | logfmt | unwrap value [1s]) by (a)`, true}, + {`quantile_over_time(0.99, {a=~".+"} | logfmt | unwrap value [1s])`, true}, // topk prefers already-seen values in tiebreakers. Since the test data generates // the same log lines for each series & the resulting promql.Vectors aren't deterministically // sorted by labels, we don't expect this to pass. @@ -85,17 +86,17 @@ func TestMappingEquivalence(t *testing.T) { qry := regular.Query(params) ctx := user.InjectOrgID(context.Background(), "fake") - mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics) + mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{}) _, _, mapped, err := mapper.Parse(params.GetExpression()) - require.Nil(t, err) + require.NoError(t, err) shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{Params: params, ExpressionOverride: mapped}) res, err := qry.Exec(ctx) - require.Nil(t, err) + require.NoError(t, err) shardedRes, err := shardedQry.Exec(ctx) - require.Nil(t, err) + require.NoError(t, err) if tc.approximate { approximatelyEquals(t, res.Data.(promql.Matrix), shardedRes.Data.(promql.Matrix)) @@ -106,6 +107,70 @@ func TestMappingEquivalence(t *testing.T) { } } +func TestMappingEquivalenceSketches(t *testing.T) { + var ( + shards = 3 + nStreams = 10_000 + rounds = 20 + streams = randomStreams(nStreams, rounds+1, shards, []string{"a", "b", "c", "d"}, true) + start = time.Unix(0, 0) + end = time.Unix(0, int64(time.Second*time.Duration(rounds))) + step = time.Second + interval = time.Duration(0) + limit = 100 + ) + + for _, tc := range []struct { + query string + realtiveError float64 + }{ + {`quantile_over_time(0.70, {a=~".+"} | logfmt | unwrap value [1s]) by (a)`, 0.03}, + {`quantile_over_time(0.99, {a=~".+"} | logfmt | unwrap value [1s]) by (a)`, 0.02}, + } { + q := NewMockQuerier( + shards, + streams, + ) + + opts := EngineOpts{} + regular := NewEngine(opts, q, NoLimits, log.NewNopLogger()) + sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger()) + + t.Run(tc.query, func(t *testing.T) { + params, err := NewLiteralParams( + tc.query, + start, + end, + step, + interval, + logproto.FORWARD, + uint32(limit), + nil, + ) + require.NoError(t, err) + qry := regular.Query(params) + ctx := user.InjectOrgID(context.Background(), "fake") + + mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{ShardQuantileOverTime}) + _, _, mapped, err := mapper.Parse(params.GetExpression()) + require.NoError(t, err) + + shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{ + Params: params, + ExpressionOverride: mapped, + }) + + res, err := qry.Exec(ctx) + require.NoError(t, err) + + shardedRes, err := shardedQry.Exec(ctx) + require.NoError(t, err) + + relativeError(t, res.Data.(promql.Matrix), shardedRes.Data.(promql.Matrix), tc.realtiveError) + }) + } +} + func TestShardCounter(t *testing.T) { var ( shards = 3 @@ -151,7 +216,7 @@ func TestShardCounter(t *testing.T) { require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), "fake") - mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics) + mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{ShardQuantileOverTime}) noop, _, mapped, err := mapper.Parse(params.GetExpression()) require.NoError(t, err) @@ -412,13 +477,13 @@ func TestRangeMappingEquivalence(t *testing.T) { // Regular engine qry := regularEngine.Query(params) res, err := qry.Exec(ctx) - require.Nil(t, err) + require.NoError(t, err) // Downstream engine - split by range rangeMapper, err := NewRangeMapper(tc.splitByInterval, nilRangeMetrics, NewMapperStats()) - require.Nil(t, err) + require.NoError(t, err) noop, rangeExpr, err := rangeMapper.Parse(syntax.MustParseExpr(tc.query)) - require.Nil(t, err) + require.NoError(t, err) require.False(t, noop, "downstream engine cannot execute noop") @@ -451,3 +516,22 @@ func approximatelyEquals(t *testing.T, as, bs promql.Matrix) { require.Equalf(t, a, b, "metric %s differs from %s at %d", a.Metric, b.Metric, i) } } + +func relativeError(t *testing.T, expected, actual promql.Matrix, alpha float64) { + require.Len(t, actual, len(expected)) + + for i := 0; i < len(expected); i++ { + expectedSeries := expected[i] + actualSeries := actual[i] + require.Equal(t, expectedSeries.Metric, actualSeries.Metric) + require.Lenf(t, actualSeries.Floats, len(expectedSeries.Floats), "for series %s", expectedSeries.Metric) + + e := make([]float64, len(expectedSeries.Floats)) + a := make([]float64, len(expectedSeries.Floats)) + for j := 0; j < len(expectedSeries.Floats); j++ { + e[j] = expectedSeries.Floats[j].F + a[j] = actualSeries.Floats[j].F + } + require.InEpsilonSlice(t, e, a, alpha) + } +} diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index e04cf1dcffa71..89490d479e34e 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -2,6 +2,7 @@ package logql import ( "context" + "errors" "flag" "fmt" "math" @@ -83,7 +84,14 @@ func (s SelectLogParams) String() string { // LogSelector returns the LogSelectorExpr from the SelectParams. // The `LogSelectorExpr` can then returns all matchers and filters to use for that request. func (s SelectLogParams) LogSelector() (syntax.LogSelectorExpr, error) { - return syntax.ParseLogSelector(s.Selector, true) + if s.QueryRequest.Plan == nil { + return nil, errors.New("query plan is empty") + } + expr, ok := s.QueryRequest.Plan.AST.(syntax.LogSelectorExpr) + if !ok { + return nil, errors.New("only log selector is supported") + } + return expr, nil } type SelectSampleParams struct { @@ -93,13 +101,20 @@ type SelectSampleParams struct { // Expr returns the SampleExpr from the SelectSampleParams. // The `LogSelectorExpr` can then returns all matchers and filters to use for that request. func (s SelectSampleParams) Expr() (syntax.SampleExpr, error) { - return syntax.ParseSampleExpr(s.Selector) + if s.SampleQueryRequest.Plan == nil { + return nil, errors.New("query plan is empty") + } + expr, ok := s.SampleQueryRequest.Plan.AST.(syntax.SampleExpr) + if !ok { + return nil, errors.New("only sample expression supported") + } + return expr, nil } // LogSelector returns the LogSelectorExpr from the SelectParams. // The `LogSelectorExpr` can then returns all matchers and filters to use for that request. func (s SelectSampleParams) LogSelector() (syntax.LogSelectorExpr, error) { - expr, err := syntax.ParseSampleExpr(s.Selector) + expr, err := s.Expr() if err != nil { return nil, err } @@ -327,21 +342,37 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_ if err != nil { return nil, err } + stepEvaluator, err := q.evaluator.NewStepEvaluator(ctx, q.evaluator, expr, q.params) if err != nil { return nil, err } defer util.LogErrorWithContext(ctx, "closing SampleExpr", stepEvaluator.Close) - maxSeriesCapture := func(id string) int { return q.limits.MaxQuerySeries(ctx, id) } - maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture) - - seriesIndex := map[uint64]*promql.Series{} - next, ts, r := stepEvaluator.Next() if stepEvaluator.Error() != nil { return nil, stepEvaluator.Error() } + + if next && r != nil { + switch vec := r.(type) { + case SampleVector: + maxSeriesCapture := func(id string) int { return q.limits.MaxQuerySeries(ctx, id) } + maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture) + return q.JoinSampleVector(next, ts, vec, stepEvaluator, maxSeries) + case ProbabilisticQuantileVector: + return JoinQuantileSketchVector(next, vec, stepEvaluator) + default: + return nil, fmt.Errorf("unsupported result type: %T", r) + } + } + return nil, nil +} + +func (q *query) JoinSampleVector(next bool, ts int64, r StepResult, stepEvaluator StepEvaluator, maxSeries int) (promql_parser.Value, error) { + + seriesIndex := map[uint64]*promql.Series{} + vec := promql.Vector{} if next { vec = r.SampleVector() diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index e0b6ab3dff2ae..2dce4ba57ed41 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/grafana/loki/pkg/logqlmodel/metadata" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" "github.com/go-kit/log" @@ -64,8 +65,15 @@ func TestEngine_LogsRateUnwrap(t *testing.T) { {newSeries(testSize, offset(46, constantValue(1)), `{app="foo"}`)}, }, []SelectSampleParams{ - {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(60, 0), Selector: `rate({app="foo"} | unwrap foo[30s])`}}, - }, + {&logproto.SampleQueryRequest{ + Start: time.Unix(30, 0), + End: time.Unix(60, 0), + Selector: `rate({app="foo"} | unwrap foo[30s])`, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`rate({app="foo"} | unwrap foo[30s])`), + }, + }, + }}, // there are 15 samples (from 47 to 61) matched from the generated series // SUM(n=47, 61, 1) = 15 // 15 / 30 = 0.5 @@ -82,7 +90,14 @@ func TestEngine_LogsRateUnwrap(t *testing.T) { {newSeries(testSize, offset(46, incValue(1)), `{app="foo"}`)}, }, []SelectSampleParams{ - {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(60, 0), Selector: `rate({app="foo"} | unwrap foo[30s])`}}, + {&logproto.SampleQueryRequest{ + Start: time.Unix(30, 0), + End: time.Unix(60, 0), + Selector: `rate({app="foo"} | unwrap foo[30s])`, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`rate({app="foo"} | unwrap foo[30s])`), + }, + }}, }, // there are 15 samples (from 47 to 61) matched from the generated series // SUM(n=47, 61, n) = (47+48+...+61) = 810 @@ -100,7 +115,14 @@ func TestEngine_LogsRateUnwrap(t *testing.T) { {newSeries(testSize, offset(46, constantValue(1)), `{app="foo"}`)}, }, []SelectSampleParams{ - {&logproto.SampleQueryRequest{Start: time.Unix(30, 0), End: time.Unix(60, 0), Selector: `rate_counter({app="foo"} | unwrap foo[30s])`}}, + {&logproto.SampleQueryRequest{ + Start: time.Unix(30, 0), + End: time.Unix(60, 0), + Selector: `rate_counter({app="foo"} | unwrap foo[30s])`, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`rate_counter({app="foo"} | unwrap foo[30s])`), + }, + }}, }, // there are 15 samples (from 47 to 61) matched from the generated series // (1 - 1) / 30 = 0 @@ -2669,6 +2691,9 @@ func newQuerierRecorder(t *testing.T, data interface{}, params interface{}) *que if streamsIn, ok := data.([][]logproto.Stream); ok { if paramsIn, ok2 := params.([]SelectLogParams); ok2 { for i, p := range paramsIn { + p.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(p.Selector), + } streams[paramsID(p)] = streamsIn[i] } } @@ -2678,6 +2703,9 @@ func newQuerierRecorder(t *testing.T, data interface{}, params interface{}) *que if seriesIn, ok := data.([][]logproto.Series); ok { if paramsIn, ok2 := params.([]SelectSampleParams); ok2 { for i, p := range paramsIn { + p.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(p.Selector), + } series[paramsID(p)] = seriesIn[i] } } diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go index fdb9190956a5b..2d6837ef6a78a 100644 --- a/pkg/logql/evaluator.go +++ b/pkg/logql/evaluator.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/util" ) @@ -210,6 +211,9 @@ func (ev *DefaultEvaluator) NewIterator(ctx context.Context, expr syntax.LogSele Direction: q.Direction(), Selector: expr.String(), Shards: q.Shards(), + Plan: &plan.QueryPlan{ + AST: expr, + }, }, } @@ -238,6 +242,9 @@ func (ev *DefaultEvaluator) NewStepEvaluator( End: q.End().Add(-rangExpr.Left.Offset), Selector: e.String(), // intentionally send the vector for reducing labels. Shards: q.Shards(), + Plan: &plan.QueryPlan{ + AST: expr, + }, }, }) if err != nil { @@ -254,6 +261,9 @@ func (ev *DefaultEvaluator) NewStepEvaluator( End: q.End().Add(-e.Left.Offset), Selector: expr.String(), Shards: q.Shards(), + Plan: &plan.QueryPlan{ + AST: expr, + }, }, }) if err != nil { @@ -515,17 +525,18 @@ func newRangeAggEvaluator( q Params, o time.Duration, ) (StepEvaluator, error) { + switch expr.Operation { + case syntax.OpRangeTypeAbsent: + iter, err := newRangeVectorIterator( + it, expr, + expr.Left.Interval.Nanoseconds(), + q.Step().Nanoseconds(), + q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(), + ) + if err != nil { + return nil, err + } - iter, err := newRangeVectorIterator( - it, expr, - expr.Left.Interval.Nanoseconds(), - q.Step().Nanoseconds(), - q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(), - ) - if err != nil { - return nil, err - } - if expr.Operation == syntax.OpRangeTypeAbsent { absentLabels, err := absentLabels(expr) if err != nil { return nil, err @@ -534,10 +545,32 @@ func newRangeAggEvaluator( iter: iter, lbs: absentLabels, }, nil + case syntax.OpRangeTypeQuantileSketch: + iter := newQuantileSketchIterator( + it, + expr.Left.Interval.Nanoseconds(), + q.Step().Nanoseconds(), + q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(), + ) + + return &QuantileSketchStepEvaluator{ + iter: iter, + }, nil + default: + iter, err := newRangeVectorIterator( + it, expr, + expr.Left.Interval.Nanoseconds(), + q.Step().Nanoseconds(), + q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(), + ) + if err != nil { + return nil, err + } + + return &RangeVectorEvaluator{ + iter: iter, + }, nil } - return &RangeVectorEvaluator{ - iter: iter, - }, nil } type RangeVectorEvaluator struct { diff --git a/pkg/logql/explain.go b/pkg/logql/explain.go index ef161b38c8f97..4890d150f0a61 100644 --- a/pkg/logql/explain.go +++ b/pkg/logql/explain.go @@ -1,5 +1,9 @@ package logql +// MaxChildrenDisplay defines the maximum number of children that should be +// shown by explain. +const MaxChildrenDisplay = 3 + func (e *LiteralStepEvaluator) Explain(parent Node) { b := parent.Child("Literal") e.nextEv.Explain(b) @@ -25,7 +29,7 @@ func (e *VectorStepEvaluator) Explain(parent Node) { func (e *ConcatStepEvaluator) Explain(parent Node) { b := parent.Child("Concat") - if len(e.evaluators) < 3 { + if len(e.evaluators) < MaxChildrenDisplay { for _, child := range e.evaluators { child.Explain(b) } diff --git a/pkg/logql/explain_test.go b/pkg/logql/explain_test.go index 5ae2f840e1c88..307aa10cfa98d 100644 --- a/pkg/logql/explain_test.go +++ b/pkg/logql/explain_test.go @@ -28,7 +28,7 @@ func TestExplain(t *testing.T) { defaultEv := NewDefaultEvaluator(querier, 30*time.Second) downEv := &DownstreamEvaluator{Downstreamer: MockDownstreamer{regular}, defaultEvaluator: defaultEv} - mapper := NewShardMapper(ConstantShards(4), nilShardMetrics) + mapper := NewShardMapper(ConstantShards(4), nilShardMetrics, []string{ShardQuantileOverTime}) _, _, expr, err := mapper.Parse(syntax.MustParseExpr(query)) require.NoError(t, err) diff --git a/pkg/logql/optimize.go b/pkg/logql/optimize.go index 1f00153e18b87..2f9c80a64f918 100644 --- a/pkg/logql/optimize.go +++ b/pkg/logql/optimize.go @@ -8,7 +8,7 @@ func optimizeSampleExpr(expr syntax.SampleExpr) (syntax.SampleExpr, error) { // we skip sharding AST for now, it's not easy to clone them since they are not part of the language. expr.Walk(func(e syntax.Expr) { switch e.(type) { - case *ConcatSampleExpr, *DownstreamSampleExpr: + case *ConcatSampleExpr, *DownstreamSampleExpr, *QuantileSketchEvalExpr, *QuantileSketchMergeExpr: skip = true return } @@ -16,9 +16,7 @@ func optimizeSampleExpr(expr syntax.SampleExpr) (syntax.SampleExpr, error) { if skip { return expr, nil } - // clone the expr. - q := expr.String() - expr, err := syntax.ParseSampleExpr(q) + expr, err := syntax.Clone[syntax.SampleExpr](expr) if err != nil { return nil, err } diff --git a/pkg/logql/quantile_over_time_sketch.go b/pkg/logql/quantile_over_time_sketch.go new file mode 100644 index 0000000000000..94aea83dcd90e --- /dev/null +++ b/pkg/logql/quantile_over_time_sketch.go @@ -0,0 +1,413 @@ +package logql + +import ( + "fmt" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + promql_parser "github.com/prometheus/prometheus/promql/parser" + + "github.com/grafana/loki/pkg/iter" + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/sketch" + "github.com/grafana/loki/pkg/logqlmodel" +) + +const ( + QuantileSketchMatrixType = "QuantileSketchMatrix" +) + +type ProbabilisticQuantileVector []ProbabilisticQuantileSample +type ProbabilisticQuantileMatrix []ProbabilisticQuantileVector + +func (q ProbabilisticQuantileVector) Merge(right ProbabilisticQuantileVector) (ProbabilisticQuantileVector, error) { + // labels hash to vector index map + groups := make(map[uint64]int) + for i, sample := range q { + groups[sample.Metric.Hash()] = i + } + + for _, sample := range right { + i, ok := groups[sample.Metric.Hash()] + if !ok { + q = append(q, sample) + continue + } + + _, err := q[i].F.Merge(sample.F) + if err != nil { + return q, err + } + } + + return q, nil +} + +func (ProbabilisticQuantileVector) SampleVector() promql.Vector { + return promql.Vector{} +} + +func (q ProbabilisticQuantileVector) QuantileSketchVec() ProbabilisticQuantileVector { + return q +} + +func (q ProbabilisticQuantileVector) ToProto() *logproto.QuantileSketchVector { + samples := make([]*logproto.QuantileSketchSample, len(q)) + for i, sample := range q { + samples[i] = sample.ToProto() + } + return &logproto.QuantileSketchVector{Samples: samples} +} + +func ProbabilisticQuantileVectorFromProto(proto *logproto.QuantileSketchVector) (ProbabilisticQuantileVector, error) { + out := make([]ProbabilisticQuantileSample, len(proto.Samples)) + var s ProbabilisticQuantileSample + var err error + for i, sample := range proto.Samples { + s, err = probabilisticQuantileSampleFromProto(sample) + if err != nil { + return ProbabilisticQuantileVector{}, err + } + out[i] = s + } + return out, nil +} + +func (ProbabilisticQuantileMatrix) String() string { + return "QuantileSketchMatrix()" +} + +func (ProbabilisticQuantileMatrix) Type() promql_parser.ValueType { return QuantileSketchMatrixType } + +func (m ProbabilisticQuantileMatrix) ToProto() *logproto.QuantileSketchMatrix { + values := make([]*logproto.QuantileSketchVector, len(m)) + for i, vec := range m { + values[i] = vec.ToProto() + } + return &logproto.QuantileSketchMatrix{Values: values} +} + +func ProbabilisticQuantileMatrixFromProto(proto *logproto.QuantileSketchMatrix) (ProbabilisticQuantileMatrix, error) { + out := make([]ProbabilisticQuantileVector, len(proto.Values)) + var s ProbabilisticQuantileVector + var err error + for i, v := range proto.Values { + s, err = ProbabilisticQuantileVectorFromProto(v) + if err != nil { + return ProbabilisticQuantileMatrix{}, err + } + out[i] = s + } + return out, nil +} + +type QuantileSketchStepEvaluator struct { + iter RangeVectorIterator + + err error +} + +func (e *QuantileSketchStepEvaluator) Next() (bool, int64, StepResult) { + next := e.iter.Next() + if !next { + return false, 0, ProbabilisticQuantileVector{} + } + ts, r := e.iter.At() + vec := r.QuantileSketchVec() + for _, s := range vec { + // Errors are not allowed in metrics unless they've been specifically requested. + if s.Metric.Has(logqlmodel.ErrorLabel) && s.Metric.Get(logqlmodel.PreserveErrorLabel) != "true" { + e.err = logqlmodel.NewPipelineErr(s.Metric) + return false, 0, ProbabilisticQuantileVector{} + } + } + return true, ts, vec +} + +func (e *QuantileSketchStepEvaluator) Close() error { return e.iter.Close() } + +func (e *QuantileSketchStepEvaluator) Error() error { + if e.err != nil { + return e.err + } + return e.iter.Error() +} + +func (e *QuantileSketchStepEvaluator) Explain(parent Node) { + parent.Child("QuantileSketch") +} + +func newQuantileSketchIterator( + it iter.PeekingSampleIterator, + selRange, step, start, end, offset int64) RangeVectorIterator { + inner := &batchRangeVectorIterator{ + iter: it, + step: step, + end: end, + selRange: selRange, + metrics: map[string]labels.Labels{}, + window: map[string]*promql.Series{}, + agg: nil, + current: start - step, // first loop iteration will set it to start + offset: offset, + } + return &quantileSketchBatchRangeVectorIterator{ + batchRangeVectorIterator: inner, + } +} + +//batch + +type ProbabilisticQuantileSample struct { + T int64 + F sketch.QuantileSketch + + Metric labels.Labels +} + +func (q ProbabilisticQuantileSample) ToProto() *logproto.QuantileSketchSample { + metric := make([]*logproto.LabelPair, len(q.Metric)) + for i, m := range q.Metric { + metric[i] = &logproto.LabelPair{Name: m.Name, Value: m.Value} + } + + sketch := q.F.ToProto() + + return &logproto.QuantileSketchSample{ + F: sketch, + TimestampMs: q.T, + Metric: metric, + } +} + +func probabilisticQuantileSampleFromProto(proto *logproto.QuantileSketchSample) (ProbabilisticQuantileSample, error) { + s, err := sketch.QuantileSketchFromProto(proto.F) + if err != nil { + return ProbabilisticQuantileSample{}, err + } + out := ProbabilisticQuantileSample{ + T: proto.TimestampMs, + F: s, + Metric: make(labels.Labels, len(proto.Metric)), + } + + for i, p := range proto.Metric { + out.Metric[i] = labels.Label{Name: p.Name, Value: p.Value} + } + + return out, nil +} + +type quantileSketchBatchRangeVectorIterator struct { + *batchRangeVectorIterator + at []ProbabilisticQuantileSample +} + +func (r *quantileSketchBatchRangeVectorIterator) At() (int64, StepResult) { + if r.at == nil { + r.at = make([]ProbabilisticQuantileSample, 0, len(r.window)) + } + r.at = r.at[:0] + // convert ts from nano to milli seconds as the iterator work with nanoseconds + ts := r.current/1e+6 + r.offset/1e+6 + for _, series := range r.window { + r.at = append(r.at, ProbabilisticQuantileSample{ + F: r.agg(series.Floats), + T: ts, + Metric: series.Metric, + }) + } + return ts, ProbabilisticQuantileVector(r.at) +} + +func (r *quantileSketchBatchRangeVectorIterator) agg(samples []promql.FPoint) sketch.QuantileSketch { + s := sketch.NewDDSketch() + for _, v := range samples { + // The sketch from the underlying sketch package we are using + // cannot return an error when calling Add. + s.Add(v.F) //nolint:errcheck + } + return s +} + +// JoinQuantileSketchVector joins the results from stepEvaluator into a ProbabilisticQuantileMatrix. +func JoinQuantileSketchVector(next bool, r StepResult, stepEvaluator StepEvaluator) (promql_parser.Value, error) { + vec := r.QuantileSketchVec() + if stepEvaluator.Error() != nil { + return nil, stepEvaluator.Error() + } + + result := make([]ProbabilisticQuantileVector, 0) + + for next { + result = append(result, vec) + + next, _, r = stepEvaluator.Next() + vec = r.QuantileSketchVec() + if stepEvaluator.Error() != nil { + return nil, stepEvaluator.Error() + } + } + + return ProbabilisticQuantileMatrix(result), stepEvaluator.Error() +} + +// QuantileSketchMatrixStepEvaluator steps through a matrix of quantile sketch +// vectors, ie t-digest or DDSketch structures per time step. +type QuantileSketchMatrixStepEvaluator struct { + start, end, ts time.Time + step time.Duration + m ProbabilisticQuantileMatrix +} + +func NewQuantileSketchMatrixStepEvaluator(m ProbabilisticQuantileMatrix, params Params) *QuantileSketchMatrixStepEvaluator { + var ( + start = params.Start() + end = params.End() + step = params.Step() + ) + return &QuantileSketchMatrixStepEvaluator{ + start: start, + end: end, + ts: start.Add(-step), // will be corrected on first Next() call + step: step, + m: m, + } +} + +func (m *QuantileSketchMatrixStepEvaluator) Next() (bool, int64, StepResult) { + m.ts = m.ts.Add(m.step) + if m.ts.After(m.end) { + return false, 0, nil + } + + ts := m.ts.UnixNano() / int64(time.Millisecond) + + if len(m.m) == 0 { + return false, 0, nil + } + + vec := m.m[0] + + // Reset for next step + m.m = m.m[1:] + + return true, ts, vec +} + +func (*QuantileSketchMatrixStepEvaluator) Close() error { return nil } + +func (*QuantileSketchMatrixStepEvaluator) Error() error { return nil } + +func (*QuantileSketchMatrixStepEvaluator) Explain(parent Node) { + parent.Child("QuantileSketchMatrix") +} + +// QuantileSketchMergeStepEvaluator merges multiple quantile sketches into one for each +// step. +type QuantileSketchMergeStepEvaluator struct { + evaluators []StepEvaluator + err error +} + +func NewQuantileSketchMergeStepEvaluator(evaluators []StepEvaluator) *QuantileSketchMergeStepEvaluator { + return &QuantileSketchMergeStepEvaluator{ + evaluators: evaluators, + err: nil, + } +} + +func (e *QuantileSketchMergeStepEvaluator) Next() (bool, int64, StepResult) { + ok, ts, r := e.evaluators[0].Next() + var cur ProbabilisticQuantileVector + if ok { + cur = r.QuantileSketchVec() + } + + if len(e.evaluators) == 1 { + return ok, ts, cur + } + + for _, eval := range e.evaluators[1:] { + ok, nextTs, vec := eval.Next() + if ok { + if cur == nil { + cur = vec.QuantileSketchVec() + } else { + if ts != nextTs { + e.err = fmt.Errorf("timestamps of sketches differ: %d!=%d", ts, nextTs) + return false, 0, nil + } + + _, e.err = cur.Merge(vec.QuantileSketchVec()) + if e.err != nil { + return false, 0, nil + } + } + } + } + + return ok, ts, cur +} + +func (*QuantileSketchMergeStepEvaluator) Close() error { return nil } + +func (e *QuantileSketchMergeStepEvaluator) Error() error { return e.err } + +func (e *QuantileSketchMergeStepEvaluator) Explain(parent Node) { + b := parent.Child("QuantileSketchMerge") + if len(e.evaluators) < MaxChildrenDisplay { + for _, child := range e.evaluators { + child.Explain(b) + } + } else { + e.evaluators[0].Explain(b) + b.Child("...") + e.evaluators[len(e.evaluators)-1].Explain(b) + } +} + +// QuantileSketchVectorStepEvaluator evaluates a quantile sketch into a +// promql.Vector. +type QuantileSketchVectorStepEvaluator struct { + inner StepEvaluator + quantile float64 +} + +var _ StepEvaluator = NewQuantileSketchVectorStepEvaluator(nil, 0) + +func NewQuantileSketchVectorStepEvaluator(inner StepEvaluator, quantile float64) *QuantileSketchVectorStepEvaluator { + return &QuantileSketchVectorStepEvaluator{ + inner: inner, + quantile: quantile, + } +} + +func (e *QuantileSketchVectorStepEvaluator) Next() (bool, int64, StepResult) { + ok, ts, r := e.inner.Next() + quantileSketchVec := r.QuantileSketchVec() + + vec := make(promql.Vector, len(quantileSketchVec)) + + for i, quantileSketch := range quantileSketchVec { + f, _ := quantileSketch.F.Quantile(e.quantile) + + vec[i] = promql.Sample{ + T: quantileSketch.T, + F: f, + Metric: quantileSketch.Metric, + } + } + + return ok, ts, SampleVector(vec) +} + +func (*QuantileSketchVectorStepEvaluator) Close() error { return nil } + +func (*QuantileSketchVectorStepEvaluator) Error() error { return nil } + +func (e *QuantileSketchVectorStepEvaluator) Explain(parent Node) { + b := parent.Child("QuantileSketchVector") + e.inner.Explain(b) +} diff --git a/pkg/logql/quantile_over_time_sketch_test.go b/pkg/logql/quantile_over_time_sketch_test.go new file mode 100644 index 0000000000000..9a9ff1b603ebf --- /dev/null +++ b/pkg/logql/quantile_over_time_sketch_test.go @@ -0,0 +1,109 @@ +package logql + +import ( + "errors" + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/sketch" + "github.com/grafana/loki/pkg/logqlmodel" +) + +func TestProbabilisticMQuantileMatrixSerialization(t *testing.T) { + emptySketch := sketch.NewDDSketch() + ddsketchBytes := make([]byte, 0) + emptySketch.Encode(&ddsketchBytes, false) + + matrix := ProbabilisticQuantileMatrix([]ProbabilisticQuantileVector{ + []ProbabilisticQuantileSample{ + {T: 0, F: emptySketch, Metric: []labels.Label{{Name: "foo", Value: "bar"}}}, + }, + }) + + proto := &logproto.QuantileSketchMatrix{ + Values: []*logproto.QuantileSketchVector{ + { + Samples: []*logproto.QuantileSketchSample{ + { + TimestampMs: 0, + F: &logproto.QuantileSketch{Sketch: &logproto.QuantileSketch_Ddsketch{Ddsketch: ddsketchBytes}}, + Metric: []*logproto.LabelPair{{Name: "foo", Value: "bar"}}, + }, + }, + }, + }, + } + + actual := matrix.ToProto() + require.Equal(t, proto, actual) + + _, err := ProbabilisticQuantileMatrixFromProto(actual) + require.NoError(t, err) +} + +func TestQuantileSketchStepEvaluatorError(t *testing.T) { + iter := errorRangeVectorIterator{ + result: ProbabilisticQuantileVector([]ProbabilisticQuantileSample{ + {T: 43, F: nil, Metric: labels.Labels{{Name: logqlmodel.ErrorLabel, Value: "my error"}}}, + }), + } + ev := QuantileSketchStepEvaluator{ + iter: iter, + } + ok, _, _ := ev.Next() + require.False(t, ok) + + err := ev.Error() + require.ErrorContains(t, err, "my error") +} + +func TestJoinQuantileSketchVectorError(t *testing.T) { + result := ProbabilisticQuantileVector{} + ev := errorStepEvaluator{ + err: errors.New("could not evaluate"), + } + _, err := JoinQuantileSketchVector(true, result, ev) + require.ErrorContains(t, err, "could not evaluate") +} + +type errorRangeVectorIterator struct { + err error + result StepResult +} + +func (e errorRangeVectorIterator) Next() bool { + return e.result != nil +} + +func (e errorRangeVectorIterator) At() (int64, StepResult) { + return 0, e.result +} + +func (errorRangeVectorIterator) Close() error { + return nil +} + +func (e errorRangeVectorIterator) Error() error { + return e.err +} + +type errorStepEvaluator struct { + err error +} + +func (errorStepEvaluator) Next() (ok bool, ts int64, r StepResult) { + return false, 0, nil +} + +func (errorStepEvaluator) Close() error { + return nil +} + +func (e errorStepEvaluator) Error() error { + return e.err +} + +func (e errorStepEvaluator) Explain(Node) {} diff --git a/pkg/logql/range_vector_test.go b/pkg/logql/range_vector_test.go index 089bcff9e266a..c7176bed2ab90 100644 --- a/pkg/logql/range_vector_test.go +++ b/pkg/logql/range_vector_test.go @@ -3,6 +3,8 @@ package logql import ( "context" "fmt" + "math/rand" + "sort" "testing" "time" @@ -13,7 +15,9 @@ import ( "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/sketch" "github.com/grafana/loki/pkg/logql/syntax" + "github.com/grafana/loki/pkg/logql/vector" ) var samples = []logproto.Sample{ @@ -442,3 +446,69 @@ func value(value float64, negative bool) float64 { } return value } + +func TestQuantiles(t *testing.T) { + // v controls the distribution of values along the curve, a greater v + // value means there's a large distance between generated values + vs := []float64{1.0, 5.0, 10.0} + // s controls the exponential curve of the distribution + // the higher the s values the faster the drop off from max value to lesser values + // s must be > 1.0 + ss := []float64{1.01, 2.0, 3.0, 4.0} + + // T-Digest is too big for 1_000 samples. However, we did not optimize + // the format for size. + nSamples := []int{5_000, 10_000, 100_000, 1_000_000} + + factories := []struct { + newSketch sketch.QuantileSketchFactory + name string + relativeError float64 + }{ + {newSketch: func() sketch.QuantileSketch { return sketch.NewDDSketch() }, name: "DDSketch", relativeError: 0.02}, + {newSketch: sketch.NewTDigestSketch, name: "T-Digest", relativeError: 0.05}, + } + + for _, tc := range factories { + for _, samplesCount := range nSamples { + for _, s := range ss { + for _, v := range vs { + t.Run(fmt.Sprintf("sketch=%s, s=%.2f, v=%.2f, events=%d", tc.name, s, v, samplesCount), func(t *testing.T) { + sk := tc.newSketch() + + r := rand.New(rand.NewSource(42)) + z := rand.NewZipf(r, s, v, 1_000) + values := make(vector.HeapByMaxValue, 0) + for i := 0; i < samplesCount; i++ { + + value := float64(z.Uint64()) + values = append(values, promql.Sample{F: value}) + err := sk.Add(value) + require.NoError(t, err) + } + sort.Sort(values) + + // Size + var buf []byte + var err error + switch s := sk.(type) { + case *sketch.DDSketchQuantile: + buf, err = proto.Marshal(s.DDSketch.ToProto()) + require.NoError(t, err) + case *sketch.TDigestQuantile: + buf, err = proto.Marshal(s.ToProto()) + require.NoError(t, err) + } + require.Less(t, len(buf), samplesCount*8) + + // Accuracy + expected := Quantile(0.99, values) + actual, err := sk.Quantile(0.99) + require.NoError(t, err) + require.InEpsilonf(t, expected, actual, tc.relativeError, "expected quantile %f, actual quantile %f", expected, actual) + }) + } + } + } + } +} diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go index f1ee7e4ba6985..4bee2616bf036 100644 --- a/pkg/logql/shardmapper.go +++ b/pkg/logql/shardmapper.go @@ -25,15 +25,27 @@ type ConstantShards int func (s ConstantShards) Shards(_ syntax.Expr) (int, uint64, error) { return int(s), 0, nil } func (s ConstantShards) GetStats(_ syntax.Expr) (stats.Stats, error) { return stats.Stats{}, nil } +const ( + ShardQuantileOverTime = "quantile_over_time" +) + type ShardMapper struct { - shards ShardResolver - metrics *MapperMetrics + shards ShardResolver + metrics *MapperMetrics + quantileOverTimeSharding bool } -func NewShardMapper(resolver ShardResolver, metrics *MapperMetrics) ShardMapper { +func NewShardMapper(resolver ShardResolver, metrics *MapperMetrics, shardAggregation []string) ShardMapper { + quantileOverTimeSharding := false + for _, a := range shardAggregation { + if a == ShardQuantileOverTime { + quantileOverTimeSharding = true + } + } return ShardMapper{ - shards: resolver, - metrics: metrics, + shards: resolver, + metrics: metrics, + quantileOverTimeSharding: quantileOverTimeSharding, } } @@ -158,11 +170,11 @@ func (m ShardMapper) mapSampleExpr(expr syntax.SampleExpr, r *downstreamRecorder }, }, bytesPerShard, nil } - for i := shards - 1; i >= 0; i-- { + for shard := shards - 1; shard >= 0; shard-- { head = &ConcatSampleExpr{ DownstreamSampleExpr: DownstreamSampleExpr{ shard: &astmapper.ShardAnnotation{ - Shard: i, + Shard: shard, Of: shards, }, SampleExpr: expr, @@ -374,7 +386,7 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, return m.mapSampleExpr(expr, r) } - // avg_overtime() by (foo) -> sum by (foo) (sum_over_time()) / sum by (foo) (count_over_time()) + // avg_over_time() by (foo) -> sum by (foo) (sum_over_time()) / sum by (foo) (count_over_time()) lhs, lhsBytesPerShard, err := m.mapVectorAggregationExpr(&syntax.VectorAggregationExpr{ Left: &syntax.RangeAggregationExpr{ Left: expr.Left, @@ -414,6 +426,43 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, Op: syntax.OpTypeDiv, }, bytesPerShard, nil + case syntax.OpRangeTypeQuantile: + potentialConflict := syntax.ReducesLabels(expr) + if !potentialConflict && (expr.Grouping == nil || expr.Grouping.Noop()) { + return m.mapSampleExpr(expr, r) + } + + shards, bytesPerShard, err := m.shards.Shards(expr) + if err != nil { + return nil, 0, err + } + if shards == 0 || !m.quantileOverTimeSharding { + return m.mapSampleExpr(expr, r) + } + + // quantile_over_time() by (foo) -> + // quantile_sketch_eval(quantile_merge by (foo) + // (__quantile_sketch_over_time__() by (foo))) + + downstreams := make([]DownstreamSampleExpr, 0, shards) + expr.Operation = syntax.OpRangeTypeQuantileSketch + for shard := shards - 1; shard >= 0; shard-- { + downstreams = append(downstreams, DownstreamSampleExpr{ + shard: &astmapper.ShardAnnotation{ + Shard: shard, + Of: shards, + }, + SampleExpr: expr, + }) + } + + return &QuantileSketchEvalExpr{ + quantileMergeExpr: &QuantileSketchMergeExpr{ + downstreams: downstreams, + }, + quantile: expr.Params, + }, bytesPerShard, nil + default: // don't shard if there's not an appropriate optimization exprStats, err := m.shards.GetStats(expr) diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go index 80b2e68751fee..4dc4aac0fb449 100644 --- a/pkg/logql/shardmapper_test.go +++ b/pkg/logql/shardmapper_test.go @@ -51,7 +51,7 @@ func TestShardedStringer(t *testing.T) { } func TestMapSampleExpr(t *testing.T) { - m := NewShardMapper(ConstantShards(2), nilShardMetrics) + m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{ShardQuantileOverTime}) for _, tc := range []struct { in syntax.SampleExpr @@ -113,7 +113,7 @@ func TestMapSampleExpr(t *testing.T) { } func TestMappingStrings(t *testing.T) { - m := NewShardMapper(ConstantShards(2), nilShardMetrics) + m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{ShardQuantileOverTime}) for _, tc := range []struct { in string out string @@ -418,7 +418,7 @@ func TestMappingStrings(t *testing.T) { } func TestMapping(t *testing.T) { - m := NewShardMapper(ConstantShards(2), nilShardMetrics) + m := NewShardMapper(ConstantShards(2), nilShardMetrics, []string{ShardQuantileOverTime}) for _, tc := range []struct { in string @@ -1409,7 +1409,7 @@ func TestStringTrimming(t *testing.T) { }, } { t.Run(tc.expr.String(), func(t *testing.T) { - m := NewShardMapper(ConstantShards(tc.shards), nilShardMetrics) + m := NewShardMapper(ConstantShards(tc.shards), nilShardMetrics, []string{ShardQuantileOverTime}) _, _, mappedExpr, err := m.Parse(tc.expr) require.Nil(t, err) require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String())) diff --git a/pkg/logql/sketch/quantile.go b/pkg/logql/sketch/quantile.go index 14b44e69f51c7..3a0526fcfc137 100644 --- a/pkg/logql/sketch/quantile.go +++ b/pkg/logql/sketch/quantile.go @@ -6,107 +6,10 @@ import ( "github.com/DataDog/sketches-go/ddsketch" "github.com/influxdata/tdigest" - "github.com/prometheus/prometheus/model/labels" - promql_parser "github.com/prometheus/prometheus/promql/parser" "github.com/grafana/loki/pkg/logproto" ) -// QuantileSketchVector represents multiple qunatile sketches at the same point in -// time. -type QuantileSketchVector []quantileSketchSample - -// QuantileSketchMatrix contains multiples QuantileSketchVectors across many -// points in time. -type QuantileSketchMatrix []QuantileSketchVector - -// ToProto converts a quantile sketch vector to its protobuf definition. -func (q QuantileSketchVector) ToProto() *logproto.QuantileSketchVector { - samples := make([]*logproto.QuantileSketchSample, len(q)) - for i, sample := range q { - samples[i] = sample.ToProto() - } - return &logproto.QuantileSketchVector{Samples: samples} -} - -func QuantileSketchVectorFromProto(proto *logproto.QuantileSketchVector) (QuantileSketchVector, error) { - out := make([]quantileSketchSample, len(proto.Samples)) - var err error - for i, s := range proto.Samples { - out[i], err = quantileSketchSampleFromProto(s) - if err != nil { - return nil, err - } - } - return out, nil -} - -func (QuantileSketchMatrix) String() string { - return "QuantileSketchMatrix()" -} - -func (QuantileSketchMatrix) Type() promql_parser.ValueType { return "QuantileSketchMatrix" } - -func (m QuantileSketchMatrix) ToProto() *logproto.QuantileSketchMatrix { - values := make([]*logproto.QuantileSketchVector, len(m)) - for i, vec := range m { - values[i] = vec.ToProto() - } - return &logproto.QuantileSketchMatrix{Values: values} -} - -func QuantileSketchMatrixFromProto(proto *logproto.QuantileSketchMatrix) (QuantileSketchMatrix, error) { - out := make([]QuantileSketchVector, len(proto.Values)) - var err error - for i, v := range proto.Values { - out[i], err = QuantileSketchVectorFromProto(v) - if err != nil { - return nil, err - } - } - return out, nil -} - -type quantileSketchSample struct { - T int64 - F QuantileSketch - - Metric labels.Labels -} - -func (q quantileSketchSample) ToProto() *logproto.QuantileSketchSample { - metric := make([]*logproto.LabelPair, len(q.Metric)) - for i, m := range q.Metric { - metric[i] = &logproto.LabelPair{Name: m.Name, Value: m.Value} - } - - sketch := q.F.ToProto() - - return &logproto.QuantileSketchSample{ - F: sketch, - TimestampMs: q.T, - Metric: metric, - } -} - -func quantileSketchSampleFromProto(proto *logproto.QuantileSketchSample) (quantileSketchSample, error) { - sketch, err := QuantileSketchFromProto(proto.F) - if err != nil { - return quantileSketchSample{}, err - } - out := quantileSketchSample{ - T: proto.TimestampMs, - F: sketch, - Metric: make(labels.Labels, len(proto.Metric)), - } - - for i, p := range proto.Metric { - out.Metric[i] = labels.Label{Name: p.Name, Value: p.Value} - } - - return out, nil -} - // QuantileSketch estimates quantiles over time. type QuantileSketch interface { Add(float64) error diff --git a/pkg/logql/sketch/quantile_test.go b/pkg/logql/sketch/quantile_test.go deleted file mode 100644 index 3b2f34c0e87c8..0000000000000 --- a/pkg/logql/sketch/quantile_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package sketch - -import ( - "fmt" - "math/rand" - "sort" - "testing" - - "github.com/gogo/protobuf/proto" - "github.com/prometheus/prometheus/promql" - "github.com/stretchr/testify/require" - - "github.com/grafana/loki/pkg/logql" - "github.com/grafana/loki/pkg/logql/vector" -) - -func TestQuantiles(t *testing.T) { - // v controls the distribution of values along the curve, a greater v - // value means there's a large distance between generated values - vs := []float64{1.0, 5.0, 10.0} - // s controls the exponential curve of the distribution - // the higher the s values the faster the drop off from max value to lesser values - // s must be > 1.0 - ss := []float64{1.01, 2.0, 3.0, 4.0} - - // T-Digest is too big for 1_000 samples. However, we did not optimize - // the format for size. - nSamples := []int{5_000, 10_000, 100_000, 1_000_000} - - factories := []struct { - newSketch QuantileSketchFactory - name string - relativeError float64 - }{ - {newSketch: func() QuantileSketch { return NewDDSketch() }, name: "DDSketch", relativeError: 0.02}, - {newSketch: NewTDigestSketch, name: "T-Digest", relativeError: 0.05}, - } - - for _, tc := range factories { - for _, samplesCount := range nSamples { - for _, s := range ss { - for _, v := range vs { - t.Run(fmt.Sprintf("sketch=%s, s=%.2f, v=%.2f, events=%d", tc.name, s, v, samplesCount), func(t *testing.T) { - sketch := tc.newSketch() - - r := rand.New(rand.NewSource(42)) - z := rand.NewZipf(r, s, v, 1_000) - values := make(vector.HeapByMaxValue, 0) - for i := 0; i < samplesCount; i++ { - - value := float64(z.Uint64()) - values = append(values, promql.Sample{F: value}) - err := sketch.Add(value) - require.NoError(t, err) - } - sort.Sort(values) - - // Size - var buf []byte - var err error - switch s := sketch.(type) { - case *DDSketchQuantile: - buf, err = proto.Marshal(s.DDSketch.ToProto()) - require.NoError(t, err) - case *TDigestQuantile: - buf, err = proto.Marshal(s.ToProto()) - require.NoError(t, err) - } - require.Less(t, len(buf), samplesCount*8) - - // Accuracy - expected := logql.Quantile(0.99, values) - actual, err := sketch.Quantile(0.99) - require.NoError(t, err) - require.InEpsilonf(t, expected, actual, tc.relativeError, "expected quantile %f, actual quantile %f", expected, actual) - }) - } - } - } - } -} diff --git a/pkg/logql/step_evaluator.go b/pkg/logql/step_evaluator.go index 3831c8babdf27..955f9e2b97f86 100644 --- a/pkg/logql/step_evaluator.go +++ b/pkg/logql/step_evaluator.go @@ -6,6 +6,7 @@ import ( type StepResult interface { SampleVector() promql.Vector + QuantileSketchVec() ProbabilisticQuantileVector } type SampleVector promql.Vector @@ -16,6 +17,10 @@ func (p SampleVector) SampleVector() promql.Vector { return promql.Vector(p) } +func (p SampleVector) QuantileSketchVec() ProbabilisticQuantileVector { + return ProbabilisticQuantileVector{} +} + // StepEvaluator evaluate a single step of a query. type StepEvaluator interface { // while Next returns a promql.Value, the only acceptable types are Scalar and Vector. diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 95009df3a4689..e110b37236c2f 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -37,17 +37,23 @@ type Expr interface { func Clone[T Expr](e T) (T, error) { var empty T - copied, err := ParseExpr(e.String()) - if err != nil { - return empty, err - } - cast, ok := copied.(T) + v := &cloneVisitor{} + e.Accept(v) + cast, ok := v.cloned.(T) if !ok { - return empty, fmt.Errorf("unpexpected type of cloned expression: want %T, got %T", empty, copied) + return empty, fmt.Errorf("unexpected type of cloned expression: want %T, got %T", empty, v.cloned) } return cast, nil } +func MustClone[T Expr](e T) T { + copied, err := Clone[T](e) + if err != nil { + panic(err) + } + return copied +} + // implicit holds default implementations type implicit struct{} @@ -307,11 +313,12 @@ func (e *PipelineExpr) HasFilter() bool { } type LineFilterExpr struct { - Left *LineFilterExpr - Or *LineFilterExpr - Ty labels.MatchType - Match string - Op string + Left *LineFilterExpr + Or *LineFilterExpr + IsOrChild bool + Ty labels.MatchType + Match string + Op string implicit } @@ -328,6 +335,7 @@ func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr { if left.Ty == labels.MatchEqual || left.Ty == labels.MatchRegexp { left.Or = right + right.IsOrChild = true return left } @@ -380,52 +388,66 @@ func (e *LineFilterExpr) String() string { sb.WriteString(e.Left.String()) sb.WriteString(" ") } - switch e.Ty { - case labels.MatchRegexp: - sb.WriteString("|~") - case labels.MatchNotRegexp: - sb.WriteString("!~") - case labels.MatchEqual: - sb.WriteString("|=") - case labels.MatchNotEqual: - sb.WriteString("!=") + + if !e.IsOrChild { // Only write the type when we're not chaining "or" filters + switch e.Ty { + case labels.MatchRegexp: + sb.WriteString("|~") + case labels.MatchNotRegexp: + sb.WriteString("!~") + case labels.MatchEqual: + sb.WriteString("|=") + case labels.MatchNotEqual: + sb.WriteString("!=") + } + sb.WriteString(" ") } - sb.WriteString(" ") + if e.Op == "" { sb.WriteString(strconv.Quote(e.Match)) - return sb.String() + } else { + sb.WriteString(e.Op) + sb.WriteString("(") + sb.WriteString(strconv.Quote(e.Match)) + sb.WriteString(")") } - sb.WriteString(e.Op) - sb.WriteString("(") - sb.WriteString(strconv.Quote(e.Match)) - sb.WriteString(")") + + if e.Or != nil { + sb.WriteString(" or ") + // This is dirty but removes the leading MatchType from the or expression. + sb.WriteString(e.Or.String()) + } + return sb.String() } func (e *LineFilterExpr) Filter() (log.Filterer, error) { acc := make([]log.Filterer, 0) for curr := e; curr != nil; curr = curr.Left { - switch curr.Op { - case OpFilterIP: - var err error - next, err := log.NewIPLineFilter(curr.Match, curr.Ty) + var next log.Filterer + var err error + if curr.Or != nil { + next, err = newOrFilter(curr) if err != nil { return nil, err } acc = append(acc, next) - default: - var next log.Filterer - var err error - if curr.Or != nil { - next, err = newOrFilter(curr) - } else { + } else { + switch curr.Op { + case OpFilterIP: + next, err := log.NewIPLineFilter(curr.Match, curr.Ty) + if err != nil { + return nil, err + } + acc = append(acc, next) + default: next, err = log.NewFilter(curr.Match, curr.Ty) - } - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - acc = append(acc, next) + acc = append(acc, next) + } } } @@ -1140,6 +1162,11 @@ const ( // parser flags OpStrict = "--strict" OpKeepEmpty = "--keep-empty" + + // internal expressions not represented in LogQL. These are used to + // evaluate expressions differently resulting in intermediate formats + // that are not consumable by LogQL clients but are used for sharding. + OpRangeTypeQuantileSketch = "__quantile_sketch_over_time__" ) func IsComparisonOperator(op string) bool { @@ -1188,7 +1215,7 @@ type RangeAggregationExpr struct { func newRangeAggregationExpr(left *LogRange, operation string, gr *Grouping, stringParams *string) SampleExpr { var params *float64 if stringParams != nil { - if operation != OpRangeTypeQuantile { + if operation != OpRangeTypeQuantile && operation != OpRangeTypeQuantileSketch { return &RangeAggregationExpr{err: logqlmodel.NewParseError(fmt.Sprintf("parameter %s not supported for operation %s", *stringParams, operation), 0, 0)} } var err error @@ -1243,7 +1270,7 @@ func (e *RangeAggregationExpr) MatcherGroups() ([]MatcherRange, error) { func (e RangeAggregationExpr) validate() error { if e.Grouping != nil { switch e.Operation { - case OpRangeTypeAvg, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeFirst, OpRangeTypeLast: + case OpRangeTypeAvg, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeQuantileSketch, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeFirst, OpRangeTypeLast: default: return fmt.Errorf("grouping not allowed for %s aggregation", e.Operation) } @@ -1252,7 +1279,7 @@ func (e RangeAggregationExpr) validate() error { switch e.Operation { case OpRangeTypeAvg, OpRangeTypeSum, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeRate, OpRangeTypeRateCounter, - OpRangeTypeAbsent, OpRangeTypeFirst, OpRangeTypeLast: + OpRangeTypeAbsent, OpRangeTypeFirst, OpRangeTypeLast, OpRangeTypeQuantileSketch: return nil default: return fmt.Errorf("invalid aggregation %s with unwrap", e.Operation) @@ -2112,6 +2139,7 @@ var shardableOps = map[string]bool{ OpRangeTypeSum: true, OpRangeTypeMax: true, OpRangeTypeMin: true, + OpRangeTypeQuantile: true, // binops - arith OpTypeAdd: true, diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go index e1570e07e8c1f..8767651eaae75 100644 --- a/pkg/logql/syntax/ast_test.go +++ b/pkg/logql/syntax/ast_test.go @@ -404,6 +404,20 @@ func Test_FilterMatcher(t *testing.T) { }, []linecheck{{"foo", false}, {"bar", false}, {"none", true}}, }, + { + `{app="foo"} |= ip("127.0.0.1") or "foo"`, + []*labels.Matcher{ + mustNewMatcher(labels.MatchEqual, "app", "foo"), + }, + []linecheck{{"foo", true}, {"bar", false}, {"127.0.0.2", false}, {"127.0.0.1", true}}, + }, + { + `{app="foo"} != ip("127.0.0.1") or "foo"`, + []*labels.Matcher{ + mustNewMatcher(labels.MatchEqual, "app", "foo"), + }, + []linecheck{{"foo", false}, {"bar", true}, {"127.0.0.2", true}, {"127.0.0.1", false}}, + }, } { tt := tt t.Run(tt.q, func(t *testing.T) { @@ -474,6 +488,42 @@ func TestStringer(t *testing.T) { in: `0 > count_over_time({foo="bar"}[1m])`, out: `(0 > count_over_time({foo="bar"}[1m]))`, }, + { + in: `{app="foo"} |= "foo" or "bar"`, + out: `{app="foo"} |= "foo" or "bar"`, + }, + { + in: `{app="foo"} |~ "foo" or "bar" or "baz"`, + out: `{app="foo"} |~ "foo" or "bar" or "baz"`, + }, + { + in: `{app="foo"} |= ip("127.0.0.1") or "foo"`, + out: `{app="foo"} |= ip("127.0.0.1") or "foo"`, + }, + { + in: `{app="foo"} |= "foo" or ip("127.0.0.1")`, + out: `{app="foo"} |= "foo" or ip("127.0.0.1")`, + }, + { + in: `{app="foo"} |~ ip("127.0.0.1") or "foo"`, + out: `{app="foo"} |~ ip("127.0.0.1") or "foo"`, + }, + { // !(A || B) == !A && !B + in: `{app="foo"} != "foo" or "bar"`, + out: `{app="foo"} != "foo" != "bar"`, + }, + { + in: `{app="foo"} !~ "foo" or "bar"`, + out: `{app="foo"} !~ "foo" !~ "bar"`, + }, + { + in: `{app="foo"} != ip("127.0.0.1") or "foo"`, + out: `{app="foo"} != ip("127.0.0.1") != "foo"`, + }, + { + in: `{app="foo"} !~ ip("127.0.0.1") or "foo"`, + out: `{app="foo"} !~ ip("127.0.0.1") !~ "foo"`, + }, } { t.Run(tc.in, func(t *testing.T) { expr, err := ParseExpr(tc.in) diff --git a/pkg/logql/syntax/clone.go b/pkg/logql/syntax/clone.go new file mode 100644 index 0000000000000..07aeb141ea78c --- /dev/null +++ b/pkg/logql/syntax/clone.go @@ -0,0 +1,298 @@ +package syntax + +import ( + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/pkg/logql/log" +) + +type cloneVisitor struct { + cloned Expr +} + +var _ RootVisitor = &cloneVisitor{} + +func cloneGrouping(g *Grouping) *Grouping { + copied := &Grouping{ + Without: g.Without, + } + if g.Groups != nil { + copied.Groups = make([]string, len(g.Groups)) + copy(copied.Groups, g.Groups) + } + return copied +} + +func cloneVectorMatching(v *VectorMatching) *VectorMatching { + copied := *v + copy(copied.Include, v.Include) + copy(copied.MatchingLabels, v.MatchingLabels) + + return &copied +} + +func (v *cloneVisitor) VisitBinOp(e *BinOpExpr) { + lhs := MustClone[SampleExpr](e.SampleExpr) + rhs := MustClone[SampleExpr](e.RHS) + copied := &BinOpExpr{ + SampleExpr: lhs, + RHS: rhs, + Op: e.Op, + } + + if e.Opts != nil { + copied.Opts = &BinOpOptions{ + ReturnBool: e.Opts.ReturnBool, + VectorMatching: cloneVectorMatching(e.Opts.VectorMatching), + } + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitVectorAggregation(e *VectorAggregationExpr) { + copied := &VectorAggregationExpr{ + Left: MustClone[SampleExpr](e.Left), + Params: e.Params, + Operation: e.Operation, + } + + if e.Grouping != nil { + copied.Grouping = cloneGrouping(e.Grouping) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitRangeAggregation(e *RangeAggregationExpr) { + copied := &RangeAggregationExpr{ + Left: MustClone[*LogRange](e.Left), + Operation: e.Operation, + } + + if e.Grouping != nil { + copied.Grouping = cloneGrouping(e.Grouping) + } + + if e.Params != nil { + tmp := *e.Params + copied.Params = &tmp + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLabelReplace(e *LabelReplaceExpr) { + left := MustClone[SampleExpr](e.Left) + v.cloned = mustNewLabelReplaceExpr(left, e.Dst, e.Replacement, e.Src, e.Regex) +} + +func (v *cloneVisitor) VisitLiteral(e *LiteralExpr) { + v.cloned = &LiteralExpr{Val: e.Val} +} + +func (v *cloneVisitor) VisitVector(e *VectorExpr) { + v.cloned = &VectorExpr{Val: e.Val} +} + +func (v *cloneVisitor) VisitLogRange(e *LogRange) { + copied := &LogRange{ + Left: MustClone[LogSelectorExpr](e.Left), + Interval: e.Interval, + Offset: e.Offset, + } + if e.Unwrap != nil { + copied.Unwrap = &UnwrapExpr{ + Identifier: e.Unwrap.Identifier, + Operation: e.Unwrap.Operation, + } + if e.Unwrap.PostFilters != nil { + copied.Unwrap.PostFilters = make([]log.LabelFilterer, len(e.Unwrap.PostFilters)) + for i, f := range e.Unwrap.PostFilters { + copied.Unwrap.PostFilters[i] = cloneLabelFilterer(f) + } + } + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitMatchers(e *MatchersExpr) { + copied := &MatchersExpr{ + Mts: make([]*labels.Matcher, len(e.Mts)), + } + for i, m := range e.Mts { + copied.Mts[i] = labels.MustNewMatcher(m.Type, m.Name, m.Value) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitPipeline(e *PipelineExpr) { + copied := &PipelineExpr{ + Left: MustClone[*MatchersExpr](e.Left), + MultiStages: make(MultiStageExpr, len(e.MultiStages)), + } + for i, s := range e.MultiStages { + copied.MultiStages[i] = MustClone[StageExpr](s) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitDecolorize(*DecolorizeExpr) { + v.cloned = &DecolorizeExpr{} +} + +func (v *cloneVisitor) VisitDropLabels(e *DropLabelsExpr) { + copied := &DropLabelsExpr{ + dropLabels: make([]log.DropLabel, len(e.dropLabels)), + } + for i, l := range e.dropLabels { + var matcher *labels.Matcher + if l.Matcher != nil { + matcher = labels.MustNewMatcher(l.Matcher.Type, l.Matcher.Name, l.Matcher.Value) + } + copied.dropLabels[i] = log.NewDropLabel(matcher, l.Name) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitJSONExpressionParser(e *JSONExpressionParser) { + copied := &JSONExpressionParser{ + Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)), + } + copy(copied.Expressions, e.Expressions) + + v.cloned = copied +} + +func (v *cloneVisitor) VisitKeepLabel(e *KeepLabelsExpr) { + copied := &KeepLabelsExpr{ + keepLabels: make([]log.KeepLabel, len(e.keepLabels)), + } + for i, k := range e.keepLabels { + copied.keepLabels[i] = log.KeepLabel{ + Name: k.Name, + } + if k.Matcher != nil { + copied.keepLabels[i].Matcher = labels.MustNewMatcher(k.Matcher.Type, k.Matcher.Name, k.Matcher.Value) + } + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLabelFilter(e *LabelFilterExpr) { + v.cloned = &LabelFilterExpr{ + LabelFilterer: cloneLabelFilterer(e.LabelFilterer), + } +} + +func cloneLabelFilterer(filter log.LabelFilterer) log.LabelFilterer { + switch concrete := filter.(type) { + case *log.BinaryLabelFilter: + return &log.BinaryLabelFilter{ + Left: cloneLabelFilterer(concrete.Left), + Right: cloneLabelFilterer(concrete.Right), + And: concrete.And, + } + case *log.NoopLabelFilter: + copied := &log.NoopLabelFilter{} + if concrete.Matcher != nil { + copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value) + } + + return copied + case *log.BytesLabelFilter: + return &log.BytesLabelFilter{ + Name: concrete.Name, + Value: concrete.Value, + Type: concrete.Type, + } + case *log.DurationLabelFilter: + return &log.DurationLabelFilter{ + Name: concrete.Name, + Value: concrete.Value, + Type: concrete.Type, + } + case *log.NumericLabelFilter: + return &log.NumericLabelFilter{ + Name: concrete.Name, + Value: concrete.Value, + Type: concrete.Type, + } + case *log.StringLabelFilter: + copied := &log.StringLabelFilter{} + if concrete.Matcher != nil { + copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value) + } + return copied + case *log.LineFilterLabelFilter: + copied := &log.LineFilterLabelFilter{} + if concrete.Matcher != nil { + copied.Matcher = mustNewMatcher(concrete.Type, concrete.Name, concrete.Value) + } + return copied + case *log.IPLabelFilter: + return log.NewIPLabelFilter(concrete.Pattern, concrete.Label, concrete.Ty) + } + return nil +} + +func (v *cloneVisitor) VisitLabelFmt(e *LabelFmtExpr) { + copied := &LabelFmtExpr{ + Formats: make([]log.LabelFmt, len(e.Formats)), + } + copy(copied.Formats, e.Formats) + v.cloned = copied +} + +func (v *cloneVisitor) VisitLabelParser(e *LabelParserExpr) { + v.cloned = &LabelParserExpr{ + Op: e.Op, + Param: e.Param, + } +} + +func (v *cloneVisitor) VisitLineFilter(e *LineFilterExpr) { + copied := &LineFilterExpr{ + Ty: e.Ty, + Match: e.Match, + Op: e.Op, + IsOrChild: e.IsOrChild, + } + + if e.Left != nil { + copied.Left = MustClone[*LineFilterExpr](e.Left) + } + + if e.Or != nil { + copied.Or = MustClone[*LineFilterExpr](e.Or) + } + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLineFmt(e *LineFmtExpr) { + v.cloned = &LineFmtExpr{Value: e.Value} +} + +func (v *cloneVisitor) VisitLogfmtExpressionParser(e *LogfmtExpressionParser) { + copied := &LogfmtExpressionParser{ + Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)), + Strict: e.Strict, + KeepEmpty: e.KeepEmpty, + } + copy(copied.Expressions, e.Expressions) + + v.cloned = copied +} + +func (v *cloneVisitor) VisitLogfmtParser(e *LogfmtParserExpr) { + v.cloned = &LogfmtParserExpr{ + Strict: e.Strict, + KeepEmpty: e.KeepEmpty, + } +} diff --git a/pkg/logql/syntax/clone_test.go b/pkg/logql/syntax/clone_test.go new file mode 100644 index 0000000000000..01364919a6780 --- /dev/null +++ b/pkg/logql/syntax/clone_test.go @@ -0,0 +1,114 @@ +package syntax + +import ( + "strings" + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logql/log" +) + +func TestClone(t *testing.T) { + tests := map[string]struct { + query string + }{ + "simple matchers": { + query: `{env="prod", app=~"loki.*"}`, + }, + "simple aggregation": { + query: `count_over_time({env="prod", app=~"loki.*"}[5m])`, + }, + "simple aggregation with unwrap": { + query: `sum_over_time({env="prod", app=~"loki.*"} | unwrap bytes[5m])`, + }, + "bin op": { + query: `(count_over_time({env="prod", app=~"loki.*"}[5m]) >= 0)`, + }, + "label filter": { + query: `{app="foo"} |= "bar" | json | ( latency>=250ms or ( status_code<500 , status_code>200 ) )`, + }, + "line filter": { + query: `{app="foo"} |= "bar" | json |= "500" or "200"`, + }, + "drop label": { + query: `{app="foo"} |= "bar" | json | drop latency, status_code="200"`, + }, + "keep label": { + query: `{app="foo"} |= "bar" | json | keep latency, status_code="200"`, + }, + "regexp": { + query: `{env="prod", app=~"loki.*"} |~ ".*foo.*"`, + }, + "vector matching": { + query: `(sum by (cluster)(rate({foo="bar"}[5m])) / ignoring (cluster) count(rate({foo="bar"}[5m])))`, + }, + "sum over or vector": { + query: `(sum(count_over_time({foo="bar"}[5m])) or vector(1.000000))`, + }, + "label replace": { + query: `label_replace(vector(0.000000),"foo","bar","","")`, + }, + "filters with bytes": { + query: `{app="foo"} |= "bar" | json | ( status_code <500 or ( status_code>200 , size>=2.5KiB ) )`, + }, + "post filter": { + query: `quantile_over_time(0.99998,{app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200) + | line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo + | __error__ !~".+"[5m]) by (namespace,instance)`, + }, + "multiple post filters": { + query: `rate({app="foo"} | json | unwrap foo | latency >= 250ms or bytes > 42B or ( status_code < 500 and status_code > 200) or source = ip("") and user = "me" [1m])`, + }, + "true filter": { + query: `{ foo = "bar" } | foo =~".*"`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + + expr, err := ParseExpr(test.query) + require.NoError(t, err) + + actual, err := Clone[Expr](expr) + require.NoError(t, err) + + require.Equal(t, expr.Pretty(0), actual.Pretty(0)) + }) + } +} + +func TestCloneStringLabelFilter(t *testing.T) { + expr := newPipelineExpr( + newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), + MultiStageExpr{ + newLogfmtParserExpr(nil), + newLabelFilterExpr(&log.StringLabelFilter{Matcher: labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")}), + }, + ) + actual, err := Clone[Expr](expr) + require.NoError(t, err) + + require.Equal(t, expr.Pretty(0), actual.Pretty(0)) +} + +func TestCloneParseTestCases(t *testing.T) { + for _, tc := range ParseTestCases { + if tc.err == nil { + t.Run(tc.in, func(t *testing.T) { + ast, err := ParseExpr(tc.in) + require.NoError(t, err) + if strings.Contains(tc.in, "KiB") { + t.Skipf("Byte roundtrip conversion is broken. '%s' vs '%s'", tc.in, ast.String()) + } + + actual, err := Clone[Expr](ast) + require.NoError(t, err) + + require.Equal(t, ast.Pretty(0), actual.Pretty(0)) + }) + } + } +} diff --git a/pkg/logqlmodel/logqlmodel.go b/pkg/logqlmodel/logqlmodel.go index da9d7f083f22e..8ba0e198c403a 100644 --- a/pkg/logqlmodel/logqlmodel.go +++ b/pkg/logqlmodel/logqlmodel.go @@ -5,8 +5,8 @@ import ( "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" - "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/push" ) // ValueTypeStreams promql.ValueType for log streams @@ -23,7 +23,7 @@ type Result struct { } // Streams is promql.Value -type Streams []logproto.Stream +type Streams []push.Stream func (streams Streams) Len() int { return len(streams) } func (streams Streams) Swap(i, j int) { streams[i], streams[j] = streams[j], streams[i] } diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go index 339d934c10eb5..597da62805bb6 100644 --- a/pkg/logqlmodel/stats/context.go +++ b/pkg/logqlmodel/stats/context.go @@ -61,6 +61,7 @@ const ( StatsResultCache = "stats-result" VolumeResultCache = "volume-result" WriteDedupeCache = "write-dedupe" + BloomFilterCache = "bloom-filter" ) // NewContext creates a new statistics context diff --git a/pkg/loki/config_compat.go b/pkg/loki/config_compat.go index cd15b05f2da7e..1e4f800c46476 100644 --- a/pkg/loki/config_compat.go +++ b/pkg/loki/config_compat.go @@ -1,15 +1,18 @@ package loki import ( + "errors" "fmt" "github.com/grafana/loki/pkg/ingester/index" + frontend "github.com/grafana/loki/pkg/lokifrontend/frontend/v2" "github.com/grafana/loki/pkg/storage/config" ) func ValidateConfigCompatibility(c Config) error { for _, fn := range []func(Config) error{ ensureInvertedIndexShardingCompatibility, + ensureProtobufEncodingForAggregationSharding, } { if err := fn(c); err != nil { return err @@ -40,3 +43,10 @@ func ensureInvertedIndexShardingCompatibility(c Config) error { } return nil } + +func ensureProtobufEncodingForAggregationSharding(c Config) error { + if len(c.QueryRange.ShardAggregations) > 0 && c.Frontend.FrontendV2.Encoding != frontend.EncodingProtobuf { + return errors.New("shard_aggregation requires frontend.encoding=protobuf") + } + return nil +} diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index bf450f852be5a..e7848ef701a25 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -530,6 +530,7 @@ func (t *Loki) initQuerier() (services.Service, error) { internalHandler := queryrangebase.MergeMiddlewares(internalMiddlewares...).Wrap(handler) svc, err := querier.InitWorkerService( + logger, querierWorkerServiceConfig, prometheus.DefaultRegisterer, internalHandler, @@ -801,7 +802,9 @@ func (t *Loki) initIngesterQuerier() (_ services.Service, err error) { // Placeholder limits type to pass to cortex frontend type disabledShuffleShardingLimits struct{} -func (disabledShuffleShardingLimits) MaxQueriersPerUser(_ string) int { return 0 } +func (disabledShuffleShardingLimits) MaxQueriersPerUser(_ string) uint { return 0 } + +func (disabledShuffleShardingLimits) MaxQueryCapacity(_ string) float64 { return 0 } func (t *Loki) initQueryFrontendMiddleware() (_ services.Service, err error) { level.Debug(util_log.Logger).Log("msg", "initializing query frontend tripperware") @@ -1332,7 +1335,15 @@ func (t *Loki) initIndexGateway() (services.Service, error) { var bloomQuerier indexgateway.BloomQuerier if t.Cfg.BloomGateway.Enabled { - bloomGatewayClient, err := bloomgateway.NewGatewayClient(t.Cfg.BloomGateway.Client, t.Overrides, prometheus.DefaultRegisterer, logger, t.Cfg.MetricsNamespace) + bloomGatewayClient, err := bloomgateway.NewGatewayClient( + t.Cfg.BloomGateway.Client, + t.Overrides, + prometheus.DefaultRegisterer, + logger, + t.Cfg.MetricsNamespace, + t.cacheGenerationLoader, + t.Cfg.CompactorConfig.RetentionEnabled, + ) if err != nil { return nil, err } diff --git a/pkg/lokifrontend/frontend/v1/frontend.go b/pkg/lokifrontend/frontend/v1/frontend.go index ff32cbf7b98f0..cf17b62b03186 100644 --- a/pkg/lokifrontend/frontend/v1/frontend.go +++ b/pkg/lokifrontend/frontend/v1/frontend.go @@ -21,9 +21,9 @@ import ( "github.com/grafana/loki/pkg/lokifrontend/frontend/v1/frontendv1pb" "github.com/grafana/loki/pkg/querier/stats" "github.com/grafana/loki/pkg/queue" + "github.com/grafana/loki/pkg/scheduler/limits" "github.com/grafana/loki/pkg/util" lokigrpc "github.com/grafana/loki/pkg/util/httpgrpc" - "github.com/grafana/loki/pkg/util/validation" ) var errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests") @@ -42,7 +42,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { type Limits interface { // Returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. - MaxQueriersPerUser(user string) int + MaxQueriersPerUser(user string) uint + + // MaxQueryCapacity returns how much of the available query capacity can be used by this user. + MaxQueryCapacity(user string) float64 } // Frontend queues HTTP requests, dispatches them to backends, and handles retries @@ -80,12 +83,12 @@ type request struct { } // New creates a new frontend. Frontend implements service, and must be started and stopped. -func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer, metricsNamespace string) (*Frontend, error) { +func New(cfg Config, frontendLimits Limits, log log.Logger, registerer prometheus.Registerer, metricsNamespace string) (*Frontend, error) { queueMetrics := queue.NewMetrics(registerer, metricsNamespace, "query_frontend") f := &Frontend{ cfg: cfg, log: log, - limits: limits, + limits: frontendLimits, queueMetrics: queueMetrics, queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ Namespace: metricsNamespace, @@ -95,7 +98,7 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist }), } - f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, queueMetrics) + f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, limits.NewQueueLimits(frontendLimits), queueMetrics) f.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(f.cleanupInactiveUserMetrics) var err error @@ -312,13 +315,10 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error { req.enqueueTime = now req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued") - // aggregate the max queriers limit in the case of a multi tenant query - maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, f.limits.MaxQueriersPerUser) - joinedTenantID := tenant.JoinTenantIDs(tenantIDs) f.activeUsers.UpdateUserTimestamp(joinedTenantID, now) - err = f.requestQueue.Enqueue(joinedTenantID, nil, req, maxQueriers, nil) + err = f.requestQueue.Enqueue(joinedTenantID, nil, req, nil) if err == queue.ErrTooManyRequests { return errTooManyRequest } diff --git a/pkg/lokifrontend/frontend/v1/frontend_test.go b/pkg/lokifrontend/frontend/v1/frontend_test.go index f715d3e8f5fd0..a10a55b37984f 100644 --- a/pkg/lokifrontend/frontend/v1/frontend_test.go +++ b/pkg/lokifrontend/frontend/v1/frontend_test.go @@ -35,6 +35,7 @@ import ( "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" querier_worker "github.com/grafana/loki/pkg/querier/worker" "github.com/grafana/loki/pkg/queue" + "github.com/grafana/loki/pkg/scheduler/limits" "github.com/grafana/loki/pkg/util/constants" ) @@ -135,7 +136,7 @@ func TestFrontendCheckReady(t *testing.T) { qm := queue.NewMetrics(nil, constants.Loki, "query_frontend") f := &Frontend{ log: log.NewNopLogger(), - requestQueue: queue.NewRequestQueue(5, 0, qm), + requestQueue: queue.NewRequestQueue(5, 0, limits.NewQueueLimits(nil), qm), } for i := 0; i < tt.connectedClients; i++ { f.requestQueue.RegisterConsumerConnection("test") @@ -243,7 +244,7 @@ func testFrontend(t *testing.T, config Config, handler queryrangebase.Handler, t httpListen, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) - v1, err := New(config, limits{}, logger, reg, constants.Loki) + v1, err := New(config, mockLimits{}, logger, reg, constants.Loki) require.NoError(t, err) require.NotNil(t, v1) require.NoError(t, services.StartAndAwaitRunning(context.Background(), v1)) @@ -293,10 +294,15 @@ func defaultFrontendConfig() Config { return config } -type limits struct { - queriers int +type mockLimits struct { + queriers uint + queryCapacity float64 } -func (l limits) MaxQueriersPerUser(_ string) int { +func (l mockLimits) MaxQueriersPerUser(_ string) uint { return l.queriers } + +func (l mockLimits) MaxQueryCapacity(_ string) float64 { + return l.queryCapacity +} diff --git a/pkg/lokifrontend/frontend/v1/queue_test.go b/pkg/lokifrontend/frontend/v1/queue_test.go index efc04e338981f..a6f380afd492d 100644 --- a/pkg/lokifrontend/frontend/v1/queue_test.go +++ b/pkg/lokifrontend/frontend/v1/queue_test.go @@ -24,7 +24,7 @@ import ( func setupFrontend(t *testing.T, config Config) *Frontend { logger := log.NewNopLogger() - frontend, err := New(config, limits{queriers: 3}, logger, nil, constants.Loki) + frontend, err := New(config, mockLimits{queriers: 3}, logger, nil, constants.Loki) require.NoError(t, err) t.Cleanup(func() { diff --git a/pkg/lokifrontend/frontend/v2/frontend_test.go b/pkg/lokifrontend/frontend/v2/frontend_test.go index 3ab1028e96138..9a87c5ff1c7cc 100644 --- a/pkg/lokifrontend/frontend/v2/frontend_test.go +++ b/pkg/lokifrontend/frontend/v2/frontend_test.go @@ -19,7 +19,9 @@ import ( "go.uber.org/atomic" "google.golang.org/grpc" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/lokifrontend/frontend/v2/frontendv2pb" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange" "github.com/grafana/loki/pkg/querier/stats" "github.com/grafana/loki/pkg/scheduler/schedulerpb" @@ -29,7 +31,7 @@ import ( const testFrontendWorkerConcurrency = 5 -func setupFrontend(t *testing.T, schedulerReplyFunc func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend) (*Frontend, *mockScheduler) { +func setupFrontend(t *testing.T, cfg Config, schedulerReplyFunc func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend) (*Frontend, *mockScheduler) { l, err := net.Listen("tcp", "") require.NoError(t, err) @@ -41,8 +43,6 @@ func setupFrontend(t *testing.T, schedulerReplyFunc func(f *Frontend, msg *sched grpcPort, err := strconv.Atoi(p) require.NoError(t, err) - cfg := Config{} - flagext.DefaultValues(&cfg) cfg.SchedulerAddress = l.Addr().String() cfg.WorkerConcurrency = testFrontendWorkerConcurrency cfg.Addr = h @@ -102,7 +102,9 @@ func TestFrontendBasicWorkflow(t *testing.T) { userID = "test" ) - f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + cfg := Config{} + flagext.DefaultValues(&cfg) + f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { // We cannot call QueryResult directly, as Frontend is not yet waiting for the response. // It first needs to be told that enqueuing has succeeded. go sendResponseWithDelay(f, 100*time.Millisecond, userID, msg.QueryID, &httpgrpc.HTTPResponse{ @@ -119,6 +121,41 @@ func TestFrontendBasicWorkflow(t *testing.T) { require.Equal(t, []byte(body), resp.Body) } +func TestFrontendBasicWorkflowProto(t *testing.T) { + const ( + userID = "test" + ) + + ctx := user.InjectOrgID(context.Background(), userID) + + req := &queryrange.LokiRequest{ + Query: `{foo="bar"} | json`, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"} | json`), + }, + } + + resp, err := queryrange.NewEmptyResponse(req) + require.NoError(t, err) + httpReq := &httpgrpc.HTTPRequest{Url: "/loki/api/v1/query_range"} + httpResp, err := queryrange.DefaultCodec.EncodeHTTPGrpcResponse(ctx, httpReq, resp) + require.NoError(t, err) + + cfg := Config{} + flagext.DefaultValues(&cfg) + cfg.Encoding = EncodingProtobuf + f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + // We cannot call QueryResult directly, as Frontend is not yet waiting for the response. + // It first needs to be told that enqueuing has succeeded. + go sendResponseWithDelay(f, 100*time.Millisecond, userID, msg.QueryID, httpResp) + + return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} + }) + actualResp, err := f.Do(ctx, req) + require.NoError(t, err) + require.Equal(t, resp.(*queryrange.LokiResponse).Data, actualResp.(*queryrange.LokiResponse).Data) +} + func TestFrontendRetryEnqueue(t *testing.T) { // Frontend uses worker concurrency to compute number of retries. We use one less failure. failures := atomic.NewInt64(testFrontendWorkerConcurrency - 1) @@ -127,7 +164,9 @@ func TestFrontendRetryEnqueue(t *testing.T) { userID = "test" ) - f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + cfg := Config{} + flagext.DefaultValues(&cfg) + f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { fail := failures.Dec() if fail >= 0 { return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN} @@ -145,7 +184,9 @@ func TestFrontendRetryEnqueue(t *testing.T) { } func TestFrontendEnqueueFailure(t *testing.T) { - f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + cfg := Config{} + flagext.DefaultValues(&cfg) + f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN} }) @@ -155,7 +196,9 @@ func TestFrontendEnqueueFailure(t *testing.T) { } func TestFrontendCancellation(t *testing.T) { - f, ms := setupFrontend(t, nil) + cfg := Config{} + flagext.DefaultValues(&cfg) + f, ms := setupFrontend(t, cfg, nil) ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel() @@ -184,7 +227,9 @@ func TestFrontendCancellation(t *testing.T) { // all the frontend workers thus not reaching the scheduler as well. // Issue: https://github.com/grafana/loki/issues/5132 func TestFrontendWorkerCancellation(t *testing.T) { - f, ms := setupFrontend(t, nil) + cfg := Config{} + flagext.DefaultValues(&cfg) + f, ms := setupFrontend(t, cfg, nil) ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel() @@ -219,7 +264,9 @@ func TestFrontendWorkerCancellation(t *testing.T) { } func TestFrontendFailedCancellation(t *testing.T) { - f, ms := setupFrontend(t, nil) + cfg := Config{} + flagext.DefaultValues(&cfg) + f, ms := setupFrontend(t, cfg, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -258,7 +305,9 @@ func TestFrontendFailedCancellation(t *testing.T) { func TestFrontendStoppingWaitsForEmptyInflightRequests(t *testing.T) { delayResponse := 10 * time.Millisecond - f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + cfg := Config{} + flagext.DefaultValues(&cfg) + f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { // We cannot call QueryResult directly, as Frontend is not yet waiting for the response. // It first needs to be told that enqueuing has succeeded. go sendResponseWithDelay(f, 2*delayResponse, "test", msg.QueryID, &httpgrpc.HTTPResponse{ @@ -296,7 +345,9 @@ func TestFrontendStoppingWaitsForEmptyInflightRequests(t *testing.T) { func TestFrontendShuttingDownLetsSubRequestsPass(t *testing.T) { delayResponse := 100 * time.Millisecond - f, _ := setupFrontend(t, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + cfg := Config{} + flagext.DefaultValues(&cfg) + f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { // We cannot call QueryResult directly, as Frontend is not yet waiting for the response. // It first needs to be told that enqueuing has succeeded. go sendResponseWithDelay(f, delayResponse, "test", msg.QueryID, &httpgrpc.HTTPResponse{ diff --git a/pkg/querier/http.go b/pkg/querier/http.go index 1ecde15626ecb..dc29c2f61e04f 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -69,7 +69,7 @@ func NewQuerierAPI(cfg Config, querier Querier, limits Limits, logger log.Logger // RangeQueryHandler is a http.HandlerFunc for range queries and legacy log queries func (q *QuerierAPI) RangeQueryHandler(ctx context.Context, req *queryrange.LokiRequest) (logqlmodel.Result, error) { - if err := q.validateMaxEntriesLimits(ctx, req.Query, req.Limit); err != nil { + if err := q.validateMaxEntriesLimits(ctx, req.Plan.AST, req.Limit); err != nil { return logqlmodel.Result{}, err } @@ -84,7 +84,7 @@ func (q *QuerierAPI) RangeQueryHandler(ctx context.Context, req *queryrange.Loki // InstantQueryHandler is a http.HandlerFunc for instant queries. func (q *QuerierAPI) InstantQueryHandler(ctx context.Context, req *queryrange.LokiInstantRequest) (logqlmodel.Result, error) { - if err := q.validateMaxEntriesLimits(ctx, req.Query, req.Limit); err != nil { + if err := q.validateMaxEntriesLimits(ctx, req.Plan.AST, req.Limit); err != nil { return logqlmodel.Result{}, err } @@ -343,17 +343,12 @@ func (q *QuerierAPI) VolumeHandler(ctx context.Context, req *logproto.VolumeRequ return resp, nil } -func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, query string, limit uint32) error { +func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, expr syntax.Expr, limit uint32) error { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - expr, err := syntax.ParseExpr(query) - if err != nil { - return err - } - // entry limit does not apply to metric queries. if _, ok := expr.(syntax.SampleExpr); ok { return nil diff --git a/pkg/querier/http_test.go b/pkg/querier/http_test.go index 5b121ad891949..3e7eb5494a9b4 100644 --- a/pkg/querier/http_test.go +++ b/pkg/querier/http_test.go @@ -31,7 +31,14 @@ func TestTailHandler(t *testing.T) { api := NewQuerierAPI(mockQuerierConfig(), nil, limits, log.NewNopLogger()) - req, err := http.NewRequest("GET", "/", nil) + req, err := http.NewRequest("GET", `/`, nil) + require.NoError(t, err) + q := req.URL.Query() + q.Add("query", `{app="loki"}`) + req.URL.RawQuery = q.Encode() + err = req.ParseForm() + require.NoError(t, err) + ctx := user.InjectOrgID(req.Context(), "1|2") req = req.WithContext(ctx) require.NoError(t, err) diff --git a/pkg/querier/multi_tenant_querier.go b/pkg/querier/multi_tenant_querier.go index f4881df48a6d7..1727b4d7d26a7 100644 --- a/pkg/querier/multi_tenant_querier.go +++ b/pkg/querier/multi_tenant_querier.go @@ -2,7 +2,9 @@ package querier import ( "context" + "fmt" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/storage/stores/index/seriesvolume" "github.com/go-kit/log" @@ -53,6 +55,14 @@ func (q *MultiTenantQuerier) SelectLogs(ctx context.Context, params logql.Select matchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, tenantIDs, selector.Matchers()...) params.Selector = replaceMatchers(selector, filteredMatchers).String() + parsed, err := syntax.ParseLogSelector(params.Selector, true) + if err != nil { + return nil, fmt.Errorf("log selector is invalid after matcher update: %w", err) + } + params.Plan = &plan.QueryPlan{ + AST: parsed, + } + iters := make([]iter.EntryIterator, len(matchedTenants)) i := 0 for id := range matchedTenants { diff --git a/pkg/querier/multi_tenant_querier_test.go b/pkg/querier/multi_tenant_querier_test.go index 0a74fe957677b..b503f59e31964 100644 --- a/pkg/querier/multi_tenant_querier_test.go +++ b/pkg/querier/multi_tenant_querier_test.go @@ -21,6 +21,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" + "github.com/grafana/loki/pkg/querier/plan" ) func TestMultiTenantQuerier_SelectLogs(t *testing.T) { @@ -90,6 +91,9 @@ func TestMultiTenantQuerier_SelectLogs(t *testing.T) { Shards: nil, Start: time.Unix(0, 1), End: time.Unix(0, time.Now().UnixNano()), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.selector), + }, }} iter, err := multiTenantQuerier.SelectLogs(ctx, params) require.NoError(t, err) @@ -161,6 +165,9 @@ func TestMultiTenantQuerier_SelectSamples(t *testing.T) { ctx := user.InjectOrgID(context.Background(), tc.orgID) params := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{ Selector: tc.selector, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.selector), + }, }} iter, err := multiTenantQuerier.SelectSamples(ctx, params) require.NoError(t, err) @@ -191,6 +198,9 @@ func TestMultiTenantQuerier_TenantFilter(t *testing.T) { t.Run(tc.selector, func(t *testing.T) { params := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{ Selector: tc.selector, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.selector), + }, }} _, updatedSelector, err := removeTenantSelector(params, []string{}) require.NoError(t, err) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 964d92d58c10a..003354d408534 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -29,6 +29,7 @@ import ( "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/syntax" querier_limits "github.com/grafana/loki/pkg/querier/limits" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/stores/index/stats" listutil "github.com/grafana/loki/pkg/util" @@ -443,6 +444,16 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques return nil, err } + if req.Plan == nil { + parsed, err := syntax.ParseExpr(req.Query) + if err != nil { + return nil, err + } + req.Plan = &plan.QueryPlan{ + AST: parsed, + } + } + deletes, err := q.deletesForUser(ctx, req.Start, time.Now()) if err != nil { level.Error(spanlogger.FromContext(ctx)).Log("msg", "failed loading deletes for user", "err", err) @@ -456,6 +467,7 @@ func (q *SingleTenantQuerier) Tail(ctx context.Context, req *logproto.TailReques Limit: req.Limit, Direction: logproto.BACKWARD, Deletes: deletes, + Plan: req.Plan, }, } @@ -629,6 +641,15 @@ func (q *SingleTenantQuerier) seriesForMatchers( // seriesForMatcher fetches series from the store for a given matcher func (q *SingleTenantQuerier) seriesForMatcher(ctx context.Context, from, through time.Time, matcher string, shards []string) ([]logproto.SeriesIdentifier, error) { + var parsed syntax.Expr + var err error + if matcher != "" { + parsed, err = syntax.ParseExpr(matcher) + if err != nil { + return nil, err + } + } + ids, err := q.store.SelectSeries(ctx, logql.SelectLogParams{ QueryRequest: &logproto.QueryRequest{ Selector: matcher, @@ -637,6 +658,9 @@ func (q *SingleTenantQuerier) seriesForMatcher(ctx context.Context, from, throug End: through, Direction: logproto.FORWARD, Shards: shards, + Plan: &plan.QueryPlan{ + AST: parsed, + }, }, }) if err != nil { diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 4c8ee491cde61..a7dd3cb792714 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -23,6 +23,8 @@ import ( "github.com/grafana/loki/pkg/ingester/client" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/logql/syntax" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/util/constants" "github.com/grafana/loki/pkg/validation" @@ -84,10 +86,13 @@ func TestQuerier_Label_QueryTimeoutConfigFlag(t *testing.T) { func TestQuerier_Tail_QueryTimeoutConfigFlag(t *testing.T) { request := logproto.TailRequest{ - Query: "{type=\"test\"}", + Query: `{type="test"}`, DelayFor: 0, Limit: 10, Start: time.Now(), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{type="test"}`), + }, } store := newStoreMock() @@ -168,11 +173,14 @@ func defaultLimitsTestConfig() validation.Limits { func TestQuerier_validateQueryRequest(t *testing.T) { request := logproto.QueryRequest{ - Selector: "{type=\"test\", fail=\"yes\"} |= \"foo\"", + Selector: `{type="test", fail="yes"} |= "foo"`, Limit: 10, Start: time.Now().Add(-1 * time.Minute), End: time.Now(), Direction: logproto.FORWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{type="test", fail="yes"} |= "foo"`), + }, } store := newStoreMock() @@ -205,7 +213,10 @@ func TestQuerier_validateQueryRequest(t *testing.T) { _, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request}) require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "max streams matchers per query exceeded, matchers-count > limit (2 > 1)"), err) - request.Selector = "{type=\"test\"}" + request.Selector = `{type="test"}` + request.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{type="test"}`), + } _, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request}) require.NoError(t, err) @@ -395,6 +406,9 @@ func TestQuerier_IngesterMaxQueryLookback(t *testing.T) { Start: tc.end.Add(-6 * time.Hour), End: tc.end, Direction: logproto.FORWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"}`), + }, } queryClient := newQueryClientMock() @@ -442,6 +456,9 @@ func TestQuerier_concurrentTailLimits(t *testing.T) { DelayFor: 0, Limit: 10, Start: time.Now(), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr("{type=\"test\"}"), + }, } t.Parallel() @@ -879,11 +896,14 @@ func TestQuerier_RequestingIngesters(t *testing.T) { do: func(querier *SingleTenantQuerier, start, end time.Time) error { _, err := querier.SelectLogs(ctx, logql.SelectLogParams{ QueryRequest: &logproto.QueryRequest{ - Selector: "{type=\"test\", fail=\"yes\"} |= \"foo\"", + Selector: `{type="test", fail="yes"} |= "foo"`, Limit: 10, Start: start, End: end, Direction: logproto.FORWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{type="test", fail="yes"} |= "foo"`), + }, }, }) @@ -895,9 +915,12 @@ func TestQuerier_RequestingIngesters(t *testing.T) { do: func(querier *SingleTenantQuerier, start, end time.Time) error { _, err := querier.SelectSamples(ctx, logql.SelectSampleParams{ SampleQueryRequest: &logproto.SampleQueryRequest{ - Selector: "count_over_time({foo=\"bar\"}[5m])", + Selector: `count_over_time({foo="bar"}[5m])`, Start: start, End: end, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`count_over_time({foo="bar"}[5m])`), + }, }, }) return err @@ -1204,6 +1227,9 @@ func TestQuerier_SelectLogWithDeletes(t *testing.T) { Start: time.Unix(0, 300000000), End: time.Unix(0, 600000000), Direction: logproto.FORWARD, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{type="test"} |= "foo"`), + }, } _, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request}) @@ -1220,6 +1246,9 @@ func TestQuerier_SelectLogWithDeletes(t *testing.T) { {Selector: "2", Start: 400000000, End: 500000000}, {Selector: "3", Start: 500000000, End: 700000000}, }, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(request.Selector), + }, } require.Contains(t, store.Calls[0].Arguments, logql.SelectLogParams{QueryRequest: expectedRequest}) @@ -1264,6 +1293,9 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) { Selector: `count_over_time({foo="bar"}[5m])`, Start: time.Unix(0, 300000000), End: time.Unix(0, 600000000), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`count_over_time({foo="bar"}[5m])`), + }, } _, err = q.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: &request}) @@ -1279,6 +1311,9 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) { {Selector: "2", Start: 400000000, End: 500000000}, {Selector: "3", Start: 500000000, End: 700000000}, }, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(request.Selector), + }, }, } diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index b0c56a7439195..524e5bde63810 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -14,6 +14,7 @@ import ( strings "strings" "time" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/storage/stores/index/seriesvolume" "github.com/grafana/dskit/httpgrpc" @@ -62,11 +63,9 @@ func (r *LokiRequest) WithStartEnd(s time.Time, e time.Time) queryrangebase.Requ return &clone } -func (r *LokiRequest) WithStartEndTime(s time.Time, e time.Time) *LokiRequest { - clone := *r - clone.StartTs = s - clone.EndTs = e - return &clone +// WithStartEndForCache implements resultscache.Request. +func (r *LokiRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request { + return r.WithStartEnd(s, e).(resultscache.Request) } func (r *LokiRequest) WithQuery(query string) queryrangebase.Request { @@ -114,6 +113,11 @@ func (r *LokiInstantRequest) WithStartEnd(s time.Time, _ time.Time) queryrangeba return &clone } +// WithStartEndForCache implements resultscache.Request. +func (r *LokiInstantRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request { + return r.WithStartEnd(s, e).(resultscache.Request) +} + func (r *LokiInstantRequest) WithQuery(query string) queryrangebase.Request { clone := *r clone.Query = query @@ -153,6 +157,11 @@ func (r *LokiSeriesRequest) WithStartEnd(s, e time.Time) queryrangebase.Request return &clone } +// WithStartEndForCache implements resultscache.Request. +func (r *LokiSeriesRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request { + return r.WithStartEnd(s, e).(resultscache.Request) +} + func (r *LokiSeriesRequest) WithQuery(_ string) queryrangebase.Request { clone := *r return &clone @@ -229,6 +238,11 @@ func (r *LabelRequest) WithStartEnd(s, e time.Time) queryrangebase.Request { return &clone } +// WithStartEndForCache implements resultscache.Request. +func (r *LabelRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request { + return r.WithStartEnd(s, e).(resultscache.Request) +} + func (r *LabelRequest) WithQuery(query string) queryrangebase.Request { clone := *r clone.Query = query @@ -540,7 +554,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) AggregateBy: req.AggregateBy, }, ctx, err default: - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf("unknown request path: %s", r.Url)) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf("unknown request path in HTTP gRPC decode: %s", r.Url)) } } diff --git a/pkg/querier/queryrange/index_stats_cache.go b/pkg/querier/queryrange/index_stats_cache.go index 4814394fd47ab..a985167456a76 100644 --- a/pkg/querier/queryrange/index_stats_cache.go +++ b/pkg/querier/queryrange/index_stats_cache.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/validation" ) @@ -23,7 +24,7 @@ type IndexStatsSplitter struct { } // GenerateCacheKey generates a cache key based on the userID, Request and interval. -func (i IndexStatsSplitter) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string { +func (i IndexStatsSplitter) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { cacheKey := i.cacheKeyLimits.GenerateCacheKey(ctx, userID, r) return fmt.Sprintf("indexStats:%s", cacheKey) } @@ -32,7 +33,7 @@ type IndexStatsExtractor struct{} // Extract favors the ability to cache over exactness of results. It assumes a constant distribution // of log volumes over a range and will extract subsets proportionally. -func (p IndexStatsExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response { +func (p IndexStatsExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response { factor := util.GetFactorOfTime(start, end, resStart, resEnd) statsRes := res.(*IndexStatsResponse) @@ -93,7 +94,7 @@ func NewIndexStatsCacheMiddleware( c cache.Cache, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, shouldCache queryrangebase.ShouldCacheFn, - parallelismForReq func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int, + parallelismForReq queryrangebase.ParallelismForReqFn, retentionEnabled bool, transformer UserIDTransformer, metrics *queryrangebase.ResultsCacheMetrics, diff --git a/pkg/querier/queryrange/index_stats_cache_test.go b/pkg/querier/queryrange/index_stats_cache_test.go index 72b24757aef5c..c8119c6b9fe25 100644 --- a/pkg/querier/queryrange/index_stats_cache_test.go +++ b/pkg/querier/queryrange/index_stats_cache_test.go @@ -15,14 +15,17 @@ import ( "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/constants" ) func TestIndexStatsCache(t *testing.T) { cfg := queryrangebase.ResultsCacheConfig{ - CacheConfig: cache.Config{ - Cache: cache.NewMockCache(), + Config: resultscache.Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, }, } c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) @@ -158,8 +161,10 @@ func TestIndexStatsCache_RecentData(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { cfg := queryrangebase.ResultsCacheConfig{ - CacheConfig: cache.Config{ - Cache: cache.NewMockCache(), + Config: resultscache.Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, }, } c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index b6f5c4d51fb33..673c995a600b9 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -14,7 +14,6 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/tenant" - "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" @@ -28,6 +27,7 @@ import ( "github.com/grafana/loki/pkg/logql/syntax" queryrange_limits "github.com/grafana/loki/pkg/querier/queryrange/limits" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/index/stats" util_log "github.com/grafana/loki/pkg/util/log" @@ -104,7 +104,7 @@ type cacheKeyLimits struct { transformer UserIDTransformer } -func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string { +func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { split := l.QuerySplitDuration(userID) var currentInterval int64 @@ -304,7 +304,7 @@ func (q *querySizeLimiter) getBytesReadForRequest(ctx context.Context, r queryra } func (q *querySizeLimiter) getSchemaCfg(r queryrangebase.Request) (config.PeriodConfig, error) { - maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDuration(r.GetQuery()) + maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDurationFromQueryString(r.GetQuery()) if err != nil { return config.PeriodConfig{}, errors.New("failed to get range-vector and offset duration: " + err.Error()) } diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index b4bff9f96d08d..efc9b030f7f84 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -17,7 +17,9 @@ import ( "gopkg.in/yaml.v2" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel" + "github.com/grafana/loki/pkg/querier/plan" base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/util/constants" @@ -72,6 +74,9 @@ func Test_seriesLimiter(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`rate({app="foo"} |= "foo"[1m])`), + }, } ctx := user.InjectOrgID(context.Background(), "1") @@ -241,6 +246,9 @@ func Test_MaxQueryLookBack(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"} |= "foo"`), + }, } ctx := user.InjectOrgID(context.Background(), "1") @@ -589,6 +597,9 @@ func Test_MaxQuerySize(t *testing.T) { EndTs: tc.queryEnd, Direction: logproto.FORWARD, Path: "/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.query), + }, } ctx := user.InjectOrgID(context.Background(), "foo") diff --git a/pkg/querier/queryrange/log_result_cache.go b/pkg/querier/queryrange/log_result_cache.go index ee29e385e0d28..c15568d9075ac 100644 --- a/pkg/querier/queryrange/log_result_cache.go +++ b/pkg/querier/queryrange/log_result_cache.go @@ -10,14 +10,13 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/tenant" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "golang.org/x/sync/errgroup" - "github.com/grafana/dskit/tenant" - "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel/stats" @@ -201,7 +200,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR // if the response is empty and the query is larger than what is cached, update the cache if isEmpty(result) && (lokiReq.EndTs.UnixNano()-lokiReq.StartTs.UnixNano() > cachedRequest.EndTs.UnixNano()-cachedRequest.StartTs.UnixNano()) { - cachedRequest = cachedRequest.WithStartEndTime(lokiReq.GetStartTs(), lokiReq.GetEndTs()) + cachedRequest = cachedRequest.WithStartEnd(lokiReq.GetStartTs(), lokiReq.GetEndTs()).(*LokiRequest) updateCache = true } } else { @@ -216,7 +215,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR // if we're missing data at the start, start fetching from the start to the cached start. if lokiReq.GetStartTs().Before(cachedRequest.GetStartTs()) { g.Go(func() error { - startRequest = lokiReq.WithStartEndTime(lokiReq.GetStartTs(), cachedRequest.GetStartTs()) + startRequest = lokiReq.WithStartEnd(lokiReq.GetStartTs(), cachedRequest.GetStartTs()).(*LokiRequest) resp, err := l.next.Do(ctx, startRequest) if err != nil { return err @@ -233,7 +232,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR // if we're missing data at the end, start fetching from the cached end to the end. if lokiReq.GetEndTs().After(cachedRequest.GetEndTs()) { g.Go(func() error { - endRequest = lokiReq.WithStartEndTime(cachedRequest.GetEndTs(), lokiReq.GetEndTs()) + endRequest = lokiReq.WithStartEnd(cachedRequest.GetEndTs(), lokiReq.GetEndTs()).(*LokiRequest) resp, err := l.next.Do(ctx, endRequest) if err != nil { return err @@ -255,7 +254,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR // If it's not empty only merge the response. if startResp != nil { if isEmpty(startResp) { - cachedRequest = cachedRequest.WithStartEndTime(startRequest.GetStartTs(), cachedRequest.GetEndTs()) + cachedRequest = cachedRequest.WithStartEnd(startRequest.GetStartTs(), cachedRequest.GetEndTs()).(*LokiRequest) updateCache = true } else { if startResp.Status != loghttp.QueryStatusSuccess { @@ -269,7 +268,7 @@ func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedR // If it's not empty only merge the response. if endResp != nil { if isEmpty(endResp) { - cachedRequest = cachedRequest.WithStartEndTime(cachedRequest.GetStartTs(), endRequest.GetEndTs()) + cachedRequest = cachedRequest.WithStartEnd(cachedRequest.GetStartTs(), endRequest.GetEndTs()).(*LokiRequest) updateCache = true } else { if endResp.Status != loghttp.QueryStatusSuccess { diff --git a/pkg/querier/queryrange/marshal.go b/pkg/querier/queryrange/marshal.go index 0e72b15140e38..5b227a2efa09c 100644 --- a/pkg/querier/queryrange/marshal.go +++ b/pkg/querier/queryrange/marshal.go @@ -119,7 +119,7 @@ func ResultToResponse(result logqlmodel.Result, params logql.Params) (queryrange case sketch.TopKMatrix: sk, err := data.ToProto() return &TopKSketchesResponse{Response: sk}, err - case sketch.QuantileSketchMatrix: + case logql.ProbabilisticQuantileMatrix: return &QuantileSketchResponse{Response: data.ToProto()}, nil } @@ -172,7 +172,7 @@ func ResponseToResult(resp queryrangebase.Response) (logqlmodel.Result, error) { Headers: resp.GetHeaders(), }, nil case *QuantileSketchResponse: - matrix, err := sketch.QuantileSketchMatrixFromProto(r.Response) + matrix, err := logql.ProbabilisticQuantileMatrixFromProto(r.Response) if err != nil { return logqlmodel.Result{}, fmt.Errorf("cannot decode quantile sketch: %w", err) } @@ -234,6 +234,8 @@ func QueryResponseWrap(res queryrangebase.Response) (*QueryResponse, error) { p.Response = &QueryResponse_Labels{response} case *IndexStatsResponse: p.Response = &QueryResponse_Stats{response} + case *VolumeResponse: + p.Response = &QueryResponse_Volume{response} case *TopKSketchesResponse: p.Response = &QueryResponse_TopkSketches{response} case *QuantileSketchResponse: diff --git a/pkg/querier/queryrange/marshal_test.go b/pkg/querier/queryrange/marshal_test.go index 569a1af1a4b98..00dcaa2c6b704 100644 --- a/pkg/querier/queryrange/marshal_test.go +++ b/pkg/querier/queryrange/marshal_test.go @@ -7,6 +7,8 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/loghttp" + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logqlmodel" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" ) @@ -32,6 +34,18 @@ func TestResultToResponse(t *testing.T) { }, }, }, + { + name: "empty probabilistic quantile matrix", + result: logqlmodel.Result{ + Data: logql.ProbabilisticQuantileMatrix([]logql.ProbabilisticQuantileVector{}), + }, + response: &QuantileSketchResponse{ + Response: &logproto.QuantileSketchMatrix{ + Values: []*logproto.QuantileSketchVector{}, + }, + Headers: []queryrangebase.PrometheusResponseHeader(nil), + }, + }, } for _, tt := range tests { @@ -43,3 +57,26 @@ func TestResultToResponse(t *testing.T) { }) } } + +func TestResponseWrap(t *testing.T) { + for _, tt := range []struct { + name string + response queryrangebase.Response + expected isQueryResponse_Response + }{ + {"volume", &VolumeResponse{}, &QueryResponse_Volume{}}, + {"series", &LokiSeriesResponse{}, &QueryResponse_Series{}}, + {"label", &LokiLabelNamesResponse{}, &QueryResponse_Labels{}}, + {"stats", &IndexStatsResponse{}, &QueryResponse_Stats{}}, + {"prom", &LokiPromResponse{}, &QueryResponse_Prom{}}, + {"streams", &LokiResponse{}, &QueryResponse_Streams{}}, + {"topk", &TopKSketchesResponse{}, &QueryResponse_TopkSketches{}}, + {"quantile", &QuantileSketchResponse{}, &QueryResponse_QuantileSketches{}}, + } { + t.Run(tt.name, func(t *testing.T) { + actual, err := QueryResponseWrap(tt.response) + require.NoError(t, err) + require.IsType(t, tt.expected, actual.Response) + }) + } +} diff --git a/pkg/querier/queryrange/prometheus.go b/pkg/querier/queryrange/prometheus.go index 81a131c1c4771..2a8ff78c164e5 100644 --- a/pkg/querier/queryrange/prometheus.go +++ b/pkg/querier/queryrange/prometheus.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" ) var ( @@ -25,7 +26,7 @@ var ( type PrometheusExtractor struct{} // Extract wraps the original prometheus cache extractor -func (PrometheusExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response { +func (PrometheusExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response { response := extractor.Extract(start, end, res.(*LokiPromResponse).Response, resStart, resEnd) return &LokiPromResponse{ Response: response.(*queryrangebase.PrometheusResponse), diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index c2cce1dc514fd..cbc541a044044 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -4,6 +4,7 @@ package queryrange import ( + bytes "bytes" fmt "fmt" rpc "github.com/gogo/googleapis/google/rpc" _ "github.com/gogo/protobuf/gogoproto" @@ -232,6 +233,49 @@ func (m *LokiInstantRequest) GetShards() []string { return nil } +type Plan struct { + Raw []byte `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"` +} + +func (m *Plan) Reset() { *m = Plan{} } +func (*Plan) ProtoMessage() {} +func (*Plan) Descriptor() ([]byte, []int) { + return fileDescriptor_51b9d53b40d11902, []int{2} +} +func (m *Plan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Plan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Plan) XXX_Merge(src proto.Message) { + xxx_messageInfo_Plan.Merge(m, src) +} +func (m *Plan) XXX_Size() int { + return m.Size() +} +func (m *Plan) XXX_DiscardUnknown() { + xxx_messageInfo_Plan.DiscardUnknown(m) +} + +var xxx_messageInfo_Plan proto.InternalMessageInfo + +func (m *Plan) GetRaw() []byte { + if m != nil { + return m.Raw + } + return nil +} + type LokiResponse struct { Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` Data LokiData `protobuf:"bytes,2,opt,name=Data,proto3" json:"data,omitempty"` @@ -247,7 +291,7 @@ type LokiResponse struct { func (m *LokiResponse) Reset() { *m = LokiResponse{} } func (*LokiResponse) ProtoMessage() {} func (*LokiResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{2} + return fileDescriptor_51b9d53b40d11902, []int{3} } func (m *LokiResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -343,7 +387,7 @@ type LokiSeriesRequest struct { func (m *LokiSeriesRequest) Reset() { *m = LokiSeriesRequest{} } func (*LokiSeriesRequest) ProtoMessage() {} func (*LokiSeriesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{3} + return fileDescriptor_51b9d53b40d11902, []int{4} } func (m *LokiSeriesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -418,7 +462,7 @@ type LokiSeriesResponse struct { func (m *LokiSeriesResponse) Reset() { *m = LokiSeriesResponse{} } func (*LokiSeriesResponse) ProtoMessage() {} func (*LokiSeriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{4} + return fileDescriptor_51b9d53b40d11902, []int{5} } func (m *LokiSeriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -486,7 +530,7 @@ type LokiLabelNamesResponse struct { func (m *LokiLabelNamesResponse) Reset() { *m = LokiLabelNamesResponse{} } func (*LokiLabelNamesResponse) ProtoMessage() {} func (*LokiLabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{5} + return fileDescriptor_51b9d53b40d11902, []int{6} } func (m *LokiLabelNamesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -551,7 +595,7 @@ type LokiData struct { func (m *LokiData) Reset() { *m = LokiData{} } func (*LokiData) ProtoMessage() {} func (*LokiData) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{6} + return fileDescriptor_51b9d53b40d11902, []int{7} } func (m *LokiData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -596,7 +640,7 @@ type LokiPromResponse struct { func (m *LokiPromResponse) Reset() { *m = LokiPromResponse{} } func (*LokiPromResponse) ProtoMessage() {} func (*LokiPromResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{7} + return fileDescriptor_51b9d53b40d11902, []int{8} } func (m *LokiPromResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -647,7 +691,7 @@ type IndexStatsResponse struct { func (m *IndexStatsResponse) Reset() { *m = IndexStatsResponse{} } func (*IndexStatsResponse) ProtoMessage() {} func (*IndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{8} + return fileDescriptor_51b9d53b40d11902, []int{9} } func (m *IndexStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -684,7 +728,7 @@ type VolumeResponse struct { func (m *VolumeResponse) Reset() { *m = VolumeResponse{} } func (*VolumeResponse) ProtoMessage() {} func (*VolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{9} + return fileDescriptor_51b9d53b40d11902, []int{10} } func (m *VolumeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -721,7 +765,7 @@ type TopKSketchesResponse struct { func (m *TopKSketchesResponse) Reset() { *m = TopKSketchesResponse{} } func (*TopKSketchesResponse) ProtoMessage() {} func (*TopKSketchesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{10} + return fileDescriptor_51b9d53b40d11902, []int{11} } func (m *TopKSketchesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -758,7 +802,7 @@ type QuantileSketchResponse struct { func (m *QuantileSketchResponse) Reset() { *m = QuantileSketchResponse{} } func (*QuantileSketchResponse) ProtoMessage() {} func (*QuantileSketchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{11} + return fileDescriptor_51b9d53b40d11902, []int{12} } func (m *QuantileSketchResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -804,7 +848,7 @@ type QueryResponse struct { func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (*QueryResponse) ProtoMessage() {} func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{12} + return fileDescriptor_51b9d53b40d11902, []int{13} } func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -973,7 +1017,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (*QueryRequest) ProtoMessage() {} func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{13} + return fileDescriptor_51b9d53b40d11902, []int{14} } func (m *QueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1106,6 +1150,7 @@ func (*QueryRequest) XXX_OneofWrappers() []interface{} { func init() { proto.RegisterType((*LokiRequest)(nil), "queryrange.LokiRequest") proto.RegisterType((*LokiInstantRequest)(nil), "queryrange.LokiInstantRequest") + proto.RegisterType((*Plan)(nil), "queryrange.Plan") proto.RegisterType((*LokiResponse)(nil), "queryrange.LokiResponse") proto.RegisterType((*LokiSeriesRequest)(nil), "queryrange.LokiSeriesRequest") proto.RegisterType((*LokiSeriesResponse)(nil), "queryrange.LokiSeriesResponse") @@ -1126,101 +1171,102 @@ func init() { } var fileDescriptor_51b9d53b40d11902 = []byte{ - // 1498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, - 0x1a, 0x15, 0xf5, 0xb4, 0xc6, 0x8f, 0x9b, 0x3b, 0x36, 0x1c, 0x5e, 0x27, 0x97, 0x14, 0x04, 0xdc, - 0x44, 0xb7, 0x68, 0xa9, 0xc6, 0x4e, 0xf3, 0x6c, 0x8b, 0x86, 0x4d, 0x02, 0xa7, 0x4d, 0x8a, 0x84, - 0x36, 0xba, 0xe8, 0x6e, 0x2c, 0x8d, 0x25, 0x56, 0x7c, 0x99, 0x33, 0x32, 0xe2, 0x5d, 0x7f, 0x40, - 0x0b, 0xe4, 0x07, 0x74, 0x5d, 0x14, 0x68, 0x50, 0xa0, 0x8b, 0x6e, 0xba, 0xec, 0x2a, 0xcb, 0x2c, - 0x03, 0x01, 0x65, 0x1b, 0xa5, 0x8b, 0xc2, 0xab, 0xfc, 0x84, 0x62, 0x1e, 0xa4, 0x48, 0x49, 0x49, - 0xe4, 0xb4, 0x8b, 0x04, 0xe8, 0x46, 0x9a, 0x19, 0x7e, 0x87, 0x1c, 0x9e, 0x73, 0xbe, 0x6f, 0x66, - 0x08, 0x4e, 0x07, 0xbd, 0x4e, 0x73, 0xaf, 0x8f, 0x43, 0x1b, 0x87, 0xfc, 0xff, 0x20, 0x44, 0x5e, - 0x07, 0xa7, 0x9a, 0x46, 0x10, 0xfa, 0xd4, 0x87, 0x60, 0x34, 0xb2, 0xb6, 0xde, 0xb1, 0x69, 0xb7, - 0xbf, 0x63, 0xb4, 0x7c, 0xb7, 0xd9, 0xf1, 0x3b, 0x7e, 0xb3, 0xe3, 0xfb, 0x1d, 0x07, 0xa3, 0xc0, - 0x26, 0xb2, 0xd9, 0x0c, 0x83, 0x56, 0x93, 0x50, 0x44, 0xfb, 0x44, 0xe0, 0xd7, 0x56, 0x58, 0x20, - 0x6f, 0x72, 0x88, 0x1c, 0xd5, 0x65, 0x38, 0xef, 0xed, 0xf4, 0x77, 0x9b, 0xd4, 0x76, 0x31, 0xa1, - 0xc8, 0x0d, 0x64, 0xc0, 0x09, 0x36, 0x3f, 0xc7, 0xef, 0x08, 0x64, 0xdc, 0x90, 0x17, 0xff, 0x93, - 0xb9, 0x48, 0x7a, 0x98, 0xb6, 0xba, 0xf2, 0x52, 0x4d, 0x5e, 0xda, 0x73, 0x5c, 0xbf, 0x8d, 0x1d, - 0x3e, 0x17, 0x22, 0x7e, 0x65, 0xc4, 0x32, 0x8b, 0x08, 0xfa, 0xa4, 0xcb, 0x7f, 0xe4, 0xe0, 0x87, - 0x2f, 0xa4, 0x63, 0x07, 0x11, 0xdc, 0x6c, 0xe3, 0x5d, 0xdb, 0xb3, 0xa9, 0xed, 0x7b, 0x24, 0xdd, - 0x96, 0x37, 0x39, 0x37, 0xdb, 0x4d, 0xc6, 0x29, 0xae, 0x7f, 0x5d, 0x00, 0xf3, 0x37, 0xfd, 0x9e, - 0x6d, 0xe1, 0xbd, 0x3e, 0x26, 0x14, 0xae, 0x80, 0x12, 0x8f, 0x51, 0x95, 0x9a, 0xd2, 0xa8, 0x5a, - 0xa2, 0xc3, 0x46, 0x1d, 0xdb, 0xb5, 0xa9, 0x9a, 0xaf, 0x29, 0x8d, 0x45, 0x4b, 0x74, 0x20, 0x04, - 0x45, 0x42, 0x71, 0xa0, 0x16, 0x6a, 0x4a, 0xa3, 0x60, 0xf1, 0x36, 0x5c, 0x03, 0x73, 0xb6, 0x47, - 0x71, 0xb8, 0x8f, 0x1c, 0xb5, 0xca, 0xc7, 0x93, 0x3e, 0x7c, 0x1f, 0x54, 0x08, 0x45, 0x21, 0xdd, - 0x26, 0x6a, 0xb1, 0xa6, 0x34, 0xe6, 0xd7, 0xd7, 0x0c, 0x21, 0x85, 0x11, 0x4b, 0x61, 0x6c, 0xc7, - 0x52, 0x98, 0x73, 0x0f, 0x22, 0x3d, 0x77, 0xef, 0x57, 0x5d, 0xb1, 0x62, 0x10, 0xbc, 0x04, 0x4a, - 0xd8, 0x6b, 0x6f, 0x13, 0xb5, 0x74, 0x04, 0xb4, 0x80, 0xc0, 0x33, 0xa0, 0xda, 0xb6, 0x43, 0xdc, - 0x62, 0x9c, 0xa9, 0xe5, 0x9a, 0xd2, 0x58, 0x5a, 0x5f, 0x36, 0x12, 0x69, 0xaf, 0xc6, 0x97, 0xac, - 0x51, 0x14, 0x7b, 0xbd, 0x00, 0xd1, 0xae, 0x5a, 0xe1, 0x4c, 0xf0, 0x36, 0xac, 0x83, 0x32, 0xe9, - 0xa2, 0xb0, 0x4d, 0xd4, 0xb9, 0x5a, 0xa1, 0x51, 0x35, 0xc1, 0x61, 0xa4, 0xcb, 0x11, 0x4b, 0xfe, - 0xc3, 0x8f, 0x40, 0x31, 0x70, 0x90, 0xa7, 0x82, 0x9a, 0xd2, 0x58, 0x30, 0xcf, 0x0d, 0x22, 0x3d, - 0xe3, 0xdd, 0x10, 0xed, 0x22, 0x0f, 0x35, 0x1d, 0xbf, 0x67, 0x37, 0xd3, 0xa2, 0x31, 0x8c, 0x71, - 0x87, 0xd1, 0x7d, 0xdb, 0x41, 0x9e, 0xc5, 0xef, 0x51, 0xff, 0x31, 0x0f, 0x20, 0x93, 0xe7, 0x86, - 0x47, 0x28, 0xf2, 0xe8, 0xcb, 0xa8, 0xf4, 0x2e, 0x28, 0x33, 0x83, 0x6f, 0x13, 0xae, 0xd3, 0xac, - 0xb4, 0x49, 0x4c, 0x96, 0xb7, 0xe2, 0x91, 0x78, 0x2b, 0x4d, 0xe5, 0xad, 0xfc, 0x42, 0xde, 0x2a, - 0x7f, 0x03, 0x6f, 0xdf, 0x15, 0xc1, 0x82, 0xb0, 0x35, 0x09, 0x7c, 0x8f, 0x60, 0x36, 0x81, 0x2d, - 0x5e, 0x1a, 0x04, 0x65, 0x72, 0x02, 0x7c, 0xc4, 0x92, 0x57, 0xe0, 0x07, 0xa0, 0x78, 0x15, 0x51, - 0xc4, 0xe9, 0x9b, 0x5f, 0x5f, 0x31, 0x52, 0xc9, 0xc2, 0xee, 0xc5, 0xae, 0x99, 0xab, 0x8c, 0xa1, - 0xc3, 0x48, 0x5f, 0x6a, 0x23, 0x8a, 0xde, 0xf4, 0x5d, 0x9b, 0x62, 0x37, 0xa0, 0x07, 0x16, 0x47, - 0xc2, 0x77, 0x40, 0xf5, 0x5a, 0x18, 0xfa, 0xe1, 0xf6, 0x41, 0x80, 0x39, 0xdd, 0x55, 0xf3, 0xf8, - 0x61, 0xa4, 0x2f, 0xe3, 0x78, 0x30, 0x85, 0x18, 0x45, 0xc2, 0xff, 0x83, 0x12, 0xef, 0x70, 0x82, - 0xab, 0xe6, 0xf2, 0x61, 0xa4, 0xff, 0x8b, 0x43, 0x52, 0xe1, 0x22, 0x22, 0xab, 0x47, 0x69, 0x26, - 0x3d, 0x12, 0x5b, 0x94, 0xd3, 0xb6, 0x50, 0x41, 0x65, 0x1f, 0x87, 0x84, 0xdd, 0xa6, 0xc2, 0xc7, - 0xe3, 0x2e, 0xbc, 0x02, 0x00, 0x23, 0xc6, 0x26, 0xd4, 0x6e, 0x31, 0x9f, 0x33, 0x32, 0x16, 0x0d, - 0x51, 0xc6, 0x2c, 0x4c, 0xfa, 0x0e, 0x35, 0xa1, 0x64, 0x21, 0x15, 0x68, 0xa5, 0xda, 0xf0, 0xbe, - 0x02, 0x2a, 0x9b, 0x18, 0xb5, 0x71, 0x48, 0xd4, 0x6a, 0xad, 0xd0, 0x98, 0x5f, 0xff, 0x9f, 0x91, - 0xae, 0x59, 0xb7, 0x43, 0xdf, 0xc5, 0xb4, 0x8b, 0xfb, 0x24, 0x16, 0x48, 0x44, 0x9b, 0xbd, 0x41, - 0xa4, 0xef, 0xcc, 0xa2, 0xfa, 0x4c, 0x75, 0xf2, 0x99, 0xcf, 0x39, 0x8c, 0x74, 0xe5, 0x2d, 0x2b, - 0x9e, 0x62, 0xfd, 0x17, 0x05, 0xfc, 0x9b, 0x29, 0xbc, 0xc5, 0xee, 0x4d, 0x52, 0x49, 0xe6, 0x22, - 0xda, 0xea, 0xaa, 0x0a, 0xb3, 0xac, 0x25, 0x3a, 0xe9, 0x22, 0x96, 0xff, 0x4b, 0x45, 0xac, 0x70, - 0xf4, 0x22, 0x16, 0x67, 0x56, 0x71, 0x6a, 0x66, 0x95, 0x9e, 0x95, 0x59, 0xf5, 0x2f, 0x0b, 0xa2, - 0x8a, 0xc4, 0xef, 0x77, 0x84, 0x9c, 0xb8, 0x9e, 0xe4, 0x44, 0x81, 0xcf, 0x36, 0xb1, 0x9a, 0xb8, - 0xd7, 0x8d, 0x36, 0xf6, 0xa8, 0xbd, 0x6b, 0xe3, 0xf0, 0x05, 0x99, 0x91, 0xb2, 0x5b, 0x21, 0x6b, - 0xb7, 0xb4, 0x57, 0x8a, 0xaf, 0xbc, 0x57, 0xc6, 0xb2, 0xa3, 0xf4, 0x12, 0xd9, 0x51, 0x7f, 0x9a, - 0x07, 0xab, 0x4c, 0x8e, 0x9b, 0x68, 0x07, 0x3b, 0x9f, 0x20, 0xf7, 0x88, 0x92, 0x9c, 0x4a, 0x49, - 0x52, 0x35, 0xe1, 0x3f, 0x94, 0xcf, 0x40, 0xf9, 0x37, 0x0a, 0x98, 0x8b, 0x6b, 0x38, 0x34, 0x00, - 0x10, 0x30, 0x5e, 0xa6, 0x05, 0xd1, 0x4b, 0x0c, 0x1c, 0x26, 0xa3, 0x56, 0x2a, 0x02, 0x7e, 0x0e, - 0xca, 0xa2, 0x27, 0xb3, 0xe0, 0x78, 0x2a, 0x0b, 0x68, 0x88, 0x91, 0x7b, 0xa5, 0x8d, 0x02, 0x8a, - 0x43, 0xf3, 0x22, 0x9b, 0xc5, 0x20, 0xd2, 0x4f, 0x3f, 0x8f, 0x22, 0xbe, 0xf3, 0x13, 0x38, 0x26, - 0xae, 0x78, 0xa6, 0x25, 0x9f, 0x50, 0xff, 0x4a, 0x01, 0xc7, 0xd8, 0x44, 0x19, 0x35, 0x89, 0x2b, - 0xae, 0x82, 0xb9, 0x50, 0xb6, 0xf9, 0x74, 0xe7, 0xd7, 0xeb, 0x46, 0x96, 0xd6, 0x29, 0x54, 0x9a, - 0xc5, 0x07, 0x91, 0xae, 0x58, 0x09, 0x12, 0x6e, 0x64, 0x68, 0xcc, 0x4f, 0xa3, 0x91, 0x41, 0x72, - 0x19, 0xe2, 0x7e, 0xca, 0x03, 0x78, 0xc3, 0x6b, 0xe3, 0xbb, 0xcc, 0x7c, 0x23, 0x9f, 0xf6, 0x27, - 0x66, 0x74, 0x72, 0x44, 0xca, 0x64, 0xbc, 0x79, 0x79, 0x10, 0xe9, 0xe7, 0x9f, 0xc7, 0xca, 0x73, - 0xc0, 0xa9, 0x57, 0x48, 0x1b, 0x37, 0xff, 0xea, 0xaf, 0x2b, 0xdf, 0xe7, 0xc1, 0xd2, 0xa7, 0xbe, - 0xd3, 0x77, 0x71, 0x42, 0x9c, 0x3b, 0x41, 0x9c, 0x3a, 0x22, 0x2e, 0x1b, 0x6b, 0x9e, 0x1f, 0x44, - 0xfa, 0xc6, 0x4c, 0xa4, 0x65, 0x81, 0xaf, 0x2f, 0x61, 0xf7, 0xf3, 0x60, 0x65, 0xdb, 0x0f, 0x3e, - 0xde, 0xe2, 0xc7, 0xaa, 0x54, 0x5d, 0xc4, 0x13, 0xb4, 0xad, 0x8c, 0x68, 0x63, 0x88, 0x5b, 0x88, - 0x86, 0xf6, 0x5d, 0x73, 0x63, 0x10, 0xe9, 0xcd, 0x99, 0x28, 0x1b, 0x81, 0x5e, 0x5f, 0xba, 0x7e, - 0xce, 0x83, 0xd5, 0x3b, 0x7d, 0xe4, 0x51, 0xdb, 0xc1, 0x82, 0xb2, 0x84, 0xb0, 0x83, 0x09, 0xc2, - 0xb4, 0x11, 0x61, 0x59, 0x8c, 0xa4, 0xee, 0xbd, 0x41, 0xa4, 0x5f, 0x9c, 0x89, 0xba, 0x69, 0xf0, - 0xd7, 0x97, 0xc4, 0x1f, 0x8a, 0x60, 0x91, 0x1f, 0x1f, 0x12, 0xee, 0xde, 0x00, 0x72, 0xc9, 0x95, - 0xcc, 0xc1, 0x78, 0x8f, 0x16, 0x06, 0x2d, 0x63, 0x4b, 0x2e, 0xc6, 0x22, 0x02, 0x5e, 0x00, 0x65, - 0xc2, 0x77, 0x42, 0xb2, 0xa0, 0x6a, 0xe3, 0xa7, 0x86, 0xec, 0x9e, 0x6b, 0x33, 0x67, 0xc9, 0x78, - 0x76, 0x2e, 0x73, 0xd8, 0x06, 0x20, 0xde, 0x09, 0xd6, 0xc7, 0x91, 0x93, 0xdb, 0x03, 0x86, 0x16, - 0x18, 0x78, 0x0e, 0x94, 0x78, 0xe5, 0x96, 0x27, 0xe9, 0xcc, 0x63, 0x27, 0x4b, 0xe8, 0x66, 0xce, - 0x12, 0xe1, 0x70, 0x1d, 0x14, 0x83, 0xd0, 0x77, 0xe5, 0x2a, 0x7a, 0x72, 0xfc, 0x99, 0xe9, 0x65, - 0x67, 0x33, 0x67, 0xf1, 0x58, 0x78, 0x96, 0x6d, 0x79, 0xd9, 0x7a, 0x45, 0xf8, 0x11, 0x82, 0x95, - 0xac, 0x31, 0x58, 0x0a, 0x12, 0x87, 0xc2, 0xb3, 0xa0, 0xbc, 0xcf, 0xcb, 0x12, 0x3f, 0x5f, 0xb0, - 0xbd, 0x63, 0x0a, 0x94, 0x2d, 0x58, 0xec, 0xbd, 0x44, 0x2c, 0xbc, 0x0e, 0x16, 0xa8, 0x1f, 0xf4, - 0xe2, 0x02, 0x20, 0x8f, 0x1f, 0xb5, 0x34, 0x76, 0x5a, 0x81, 0xd8, 0xcc, 0x59, 0x19, 0x1c, 0xbc, - 0x0d, 0x8e, 0xed, 0x65, 0x6c, 0x8a, 0x09, 0xff, 0x1e, 0x31, 0xc6, 0xf3, 0xf4, 0xec, 0xd9, 0xcc, - 0x59, 0x13, 0x68, 0x13, 0x8c, 0x32, 0xaa, 0xfe, 0x7b, 0x01, 0x2c, 0x48, 0xcf, 0x88, 0xb3, 0xc2, - 0xf9, 0xc4, 0x06, 0xc2, 0x32, 0xff, 0x7d, 0x96, 0x0d, 0x78, 0x78, 0xca, 0x05, 0x6f, 0x27, 0x2e, - 0x10, 0xfe, 0x59, 0x1d, 0x65, 0x29, 0xd7, 0x3f, 0x85, 0x90, 0xca, 0x6f, 0xc4, 0xca, 0x0b, 0xdb, - 0x9c, 0x98, 0xbe, 0xee, 0xc6, 0x28, 0x29, 0xfb, 0x25, 0x50, 0xb1, 0xc5, 0x27, 0x84, 0x69, 0x86, - 0x99, 0xfc, 0xc2, 0xc0, 0x84, 0x94, 0x00, 0xb8, 0x31, 0x92, 0x5f, 0xb8, 0xe6, 0xf8, 0xa4, 0xfc, - 0x09, 0x28, 0x56, 0xff, 0x4c, 0xa2, 0x7e, 0x59, 0x62, 0x26, 0x16, 0xab, 0xe4, 0xc5, 0xa4, 0xf4, - 0x9b, 0x60, 0xce, 0xc5, 0x14, 0xb1, 0xbd, 0xac, 0x5a, 0xe1, 0x75, 0xe3, 0x54, 0x56, 0xaa, 0x11, - 0xdf, 0xc6, 0x2d, 0x19, 0x78, 0xcd, 0xa3, 0xe1, 0x81, 0xdc, 0xb6, 0x24, 0xe8, 0xb5, 0xcb, 0x60, - 0x31, 0x13, 0x00, 0x8f, 0x81, 0x42, 0x0f, 0xc7, 0x5f, 0x4b, 0x58, 0x93, 0x1d, 0xee, 0xf6, 0x91, - 0xd3, 0xc7, 0x9c, 0xf6, 0xaa, 0x25, 0x3a, 0x97, 0xf2, 0x17, 0x14, 0xb3, 0x0a, 0x2a, 0xa1, 0x78, - 0x8a, 0xd9, 0x7e, 0xf8, 0x58, 0xcb, 0x3d, 0x7a, 0xac, 0xe5, 0x9e, 0x3e, 0xd6, 0x94, 0x2f, 0x86, - 0x9a, 0xf2, 0xed, 0x50, 0x53, 0x1e, 0x0c, 0x35, 0xe5, 0xe1, 0x50, 0x53, 0x7e, 0x1b, 0x6a, 0xca, - 0x1f, 0x43, 0x2d, 0xf7, 0x74, 0xa8, 0x29, 0xf7, 0x9e, 0x68, 0xb9, 0x87, 0x4f, 0xb4, 0xdc, 0xa3, - 0x27, 0x5a, 0xee, 0x33, 0xe3, 0x68, 0x25, 0x6c, 0xa7, 0xcc, 0x69, 0xd9, 0xf8, 0x33, 0x00, 0x00, - 0xff, 0xff, 0x33, 0xb4, 0xee, 0x07, 0x17, 0x15, 0x00, 0x00, + // 1514 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x5b, 0x6f, 0x1b, 0x45, + 0x1b, 0xf6, 0xfa, 0x18, 0x4f, 0x0e, 0x5f, 0xbe, 0x49, 0x94, 0xee, 0x97, 0xf6, 0xdb, 0xb5, 0x2c, + 0xd1, 0x06, 0x04, 0x6b, 0x9a, 0x94, 0x1e, 0x01, 0xd1, 0xa5, 0xad, 0x52, 0xd1, 0xa2, 0x76, 0x13, + 0x71, 0x81, 0xb8, 0x99, 0xd8, 0x13, 0x7b, 0xf1, 0x9e, 0xb2, 0x33, 0x0e, 0xcd, 0x1d, 0x3f, 0x00, + 0xa4, 0xfe, 0x0a, 0x84, 0x44, 0x55, 0x89, 0x5b, 0x2e, 0xb9, 0xa1, 0x97, 0xbd, 0xac, 0x2c, 0xb1, + 0x50, 0x97, 0x0b, 0x94, 0xab, 0xfe, 0x04, 0x34, 0x87, 0x5d, 0xef, 0xda, 0x6e, 0xeb, 0x14, 0x21, + 0xb5, 0x12, 0x37, 0xf6, 0x1c, 0xde, 0x67, 0xf6, 0xdd, 0xe7, 0x79, 0xdf, 0x77, 0x66, 0x16, 0x9c, + 0x0a, 0xba, 0xed, 0xc6, 0x5e, 0x0f, 0x87, 0x36, 0x0e, 0xf9, 0xff, 0x41, 0x88, 0xbc, 0x36, 0x4e, + 0x35, 0x8d, 0x20, 0xf4, 0xa9, 0x0f, 0xc1, 0x70, 0x64, 0x75, 0xbd, 0x6d, 0xd3, 0x4e, 0x6f, 0xc7, + 0x68, 0xfa, 0x6e, 0xa3, 0xed, 0xb7, 0xfd, 0x46, 0xdb, 0xf7, 0xdb, 0x0e, 0x46, 0x81, 0x4d, 0x64, + 0xb3, 0x11, 0x06, 0xcd, 0x06, 0xa1, 0x88, 0xf6, 0x88, 0xc0, 0xaf, 0x2e, 0x33, 0x43, 0xde, 0xe4, + 0x10, 0x39, 0xaa, 0x4b, 0x73, 0xde, 0xdb, 0xe9, 0xed, 0x36, 0xa8, 0xed, 0x62, 0x42, 0x91, 0x1b, + 0x48, 0x83, 0xe3, 0xcc, 0x3f, 0xc7, 0x6f, 0x0b, 0x64, 0xdc, 0x90, 0x93, 0xff, 0xcb, 0x4c, 0x92, + 0x2e, 0xa6, 0xcd, 0x8e, 0x9c, 0xaa, 0xc9, 0xa9, 0x3d, 0xc7, 0xf5, 0x5b, 0xd8, 0xe1, 0xbe, 0x10, + 0xf1, 0x2b, 0x2d, 0x96, 0x98, 0x45, 0xd0, 0x23, 0x1d, 0xfe, 0x23, 0x07, 0x3f, 0x7e, 0x21, 0x1d, + 0x3b, 0x88, 0xe0, 0x46, 0x0b, 0xef, 0xda, 0x9e, 0x4d, 0x6d, 0xdf, 0x23, 0xe9, 0xb6, 0x5c, 0xe4, + 0xec, 0x74, 0x8b, 0x8c, 0x52, 0x5c, 0xbf, 0x5f, 0x00, 0xb3, 0x37, 0xfc, 0xae, 0x6d, 0xe1, 0xbd, + 0x1e, 0x26, 0x14, 0x2e, 0x83, 0x12, 0xb7, 0x51, 0x95, 0x9a, 0xb2, 0x56, 0xb5, 0x44, 0x87, 0x8d, + 0x3a, 0xb6, 0x6b, 0x53, 0x35, 0x5f, 0x53, 0xd6, 0xe6, 0x2d, 0xd1, 0x81, 0x10, 0x14, 0x09, 0xc5, + 0x81, 0x5a, 0xa8, 0x29, 0x6b, 0x05, 0x8b, 0xb7, 0xe1, 0x2a, 0x98, 0xb1, 0x3d, 0x8a, 0xc3, 0x7d, + 0xe4, 0xa8, 0x55, 0x3e, 0x9e, 0xf4, 0xe1, 0x87, 0xa0, 0x42, 0x28, 0x0a, 0xe9, 0x36, 0x51, 0x8b, + 0x35, 0x65, 0x6d, 0x76, 0x7d, 0xd5, 0x10, 0x52, 0x18, 0xb1, 0x14, 0xc6, 0x76, 0x2c, 0x85, 0x39, + 0xf3, 0x20, 0xd2, 0x73, 0x77, 0x7f, 0xd3, 0x15, 0x2b, 0x06, 0xc1, 0x8b, 0xa0, 0x84, 0xbd, 0xd6, + 0x36, 0x51, 0x4b, 0x47, 0x40, 0x0b, 0x08, 0x3c, 0x0d, 0xaa, 0x2d, 0x3b, 0xc4, 0x4d, 0xc6, 0x99, + 0x5a, 0xae, 0x29, 0x6b, 0x0b, 0xeb, 0x4b, 0x46, 0x22, 0xed, 0x95, 0x78, 0xca, 0x1a, 0x5a, 0xb1, + 0xd7, 0x0b, 0x10, 0xed, 0xa8, 0x15, 0xce, 0x04, 0x6f, 0xc3, 0x3a, 0x28, 0x93, 0x0e, 0x0a, 0x5b, + 0x44, 0x9d, 0xa9, 0x15, 0xd6, 0xaa, 0x26, 0x38, 0x8c, 0x74, 0x39, 0x62, 0xc9, 0x7f, 0xf8, 0x05, + 0x28, 0x06, 0x0e, 0xf2, 0x54, 0xc0, 0xbd, 0x5c, 0x34, 0x52, 0x9c, 0xdf, 0x72, 0x90, 0x67, 0x9e, + 0xed, 0x47, 0x7a, 0x26, 0x9a, 0x43, 0xb4, 0x8b, 0x3c, 0xd4, 0x70, 0xfc, 0xae, 0xdd, 0x48, 0xcb, + 0xc8, 0x56, 0x31, 0x6e, 0x33, 0x34, 0xc3, 0x59, 0x7c, 0xd5, 0xfa, 0x2f, 0x79, 0x00, 0x99, 0x60, + 0xd7, 0x3d, 0x42, 0x91, 0x47, 0x5f, 0x46, 0xb7, 0xf7, 0x41, 0x99, 0x85, 0xfc, 0x36, 0xe1, 0xca, + 0x4d, 0x4b, 0xa4, 0xc4, 0x64, 0x99, 0x2c, 0x1e, 0x89, 0xc9, 0xd2, 0x44, 0x26, 0xcb, 0x2f, 0x64, + 0xb2, 0xf2, 0x8f, 0x30, 0xa9, 0x82, 0x22, 0xeb, 0xc1, 0x45, 0x50, 0x08, 0xd1, 0x57, 0x9c, 0xb8, + 0x39, 0x8b, 0x35, 0xeb, 0x3f, 0x14, 0xc1, 0x9c, 0x48, 0x0a, 0x12, 0xf8, 0x1e, 0xc1, 0xcc, 0xd9, + 0x2d, 0x5e, 0x58, 0x04, 0xbd, 0xd2, 0x59, 0x3e, 0x62, 0xc9, 0x19, 0xf8, 0x11, 0x28, 0x5e, 0x41, + 0x14, 0x71, 0xaa, 0x67, 0xd7, 0x97, 0xd3, 0xce, 0xb2, 0xb5, 0xd8, 0x9c, 0xb9, 0xc2, 0xd8, 0x3c, + 0x8c, 0xf4, 0x85, 0x16, 0xa2, 0xe8, 0x6d, 0xdf, 0xb5, 0x29, 0x76, 0x03, 0x7a, 0x60, 0x71, 0x24, + 0x7c, 0x0f, 0x54, 0xaf, 0x86, 0xa1, 0x1f, 0x6e, 0x1f, 0x04, 0x98, 0x4b, 0x53, 0x35, 0x8f, 0x1d, + 0x46, 0xfa, 0x12, 0x8e, 0x07, 0x53, 0x88, 0xa1, 0x25, 0x7c, 0x13, 0x94, 0x78, 0x87, 0x8b, 0x51, + 0x35, 0x97, 0x0e, 0x23, 0xfd, 0x3f, 0x1c, 0x92, 0x32, 0x17, 0x16, 0x59, 0xed, 0x4a, 0x53, 0x69, + 0x97, 0x84, 0x50, 0x39, 0x1d, 0x42, 0x2a, 0xa8, 0xec, 0xe3, 0x90, 0xb0, 0x65, 0x2a, 0x7c, 0x3c, + 0xee, 0xc2, 0xcb, 0x00, 0x30, 0x62, 0x6c, 0x42, 0xed, 0x26, 0xcb, 0x12, 0x46, 0xc6, 0xbc, 0x21, + 0x8a, 0xa0, 0x85, 0x49, 0xcf, 0xa1, 0x26, 0x94, 0x2c, 0xa4, 0x0c, 0xad, 0x54, 0x1b, 0xde, 0x53, + 0x40, 0x65, 0x13, 0xa3, 0x16, 0x0e, 0x89, 0x5a, 0xad, 0x15, 0xd6, 0x66, 0xd7, 0xdf, 0x30, 0xd2, + 0x15, 0xef, 0x56, 0xe8, 0xbb, 0x98, 0x76, 0x70, 0x8f, 0xc4, 0x02, 0x09, 0x6b, 0xb3, 0xdb, 0x8f, + 0xf4, 0x9d, 0x69, 0xe2, 0x61, 0xaa, 0x2a, 0xfb, 0xcc, 0xe7, 0x1c, 0x46, 0xba, 0xf2, 0x8e, 0x15, + 0xbb, 0x58, 0xff, 0x55, 0x01, 0xff, 0x65, 0x0a, 0x6f, 0xb1, 0xb5, 0x49, 0x2a, 0x21, 0x5d, 0x44, + 0x9b, 0x1d, 0x55, 0x61, 0xe1, 0x6d, 0x89, 0x4e, 0xba, 0x04, 0xe6, 0xff, 0x56, 0x09, 0x2c, 0x1c, + 0xbd, 0x04, 0xc6, 0x59, 0x58, 0x9c, 0x98, 0x85, 0xa5, 0x67, 0x65, 0x61, 0xfd, 0x9b, 0x82, 0xa8, + 0x38, 0xf1, 0xfb, 0x1d, 0x21, 0x27, 0xae, 0x25, 0x39, 0x51, 0xe0, 0xde, 0x26, 0xa1, 0x26, 0xd6, + 0xba, 0xde, 0xc2, 0x1e, 0xb5, 0x77, 0x6d, 0x1c, 0xbe, 0x20, 0x33, 0x52, 0xe1, 0x56, 0xc8, 0x86, + 0x5b, 0x3a, 0x56, 0x8a, 0xaf, 0x7c, 0xac, 0x8c, 0x64, 0x47, 0xe9, 0x25, 0xb2, 0xa3, 0xfe, 0x34, + 0x0f, 0x56, 0x98, 0x1c, 0x37, 0xd0, 0x0e, 0x76, 0x3e, 0x45, 0xee, 0x11, 0x25, 0x39, 0x99, 0x92, + 0xa4, 0x6a, 0xc2, 0x7f, 0x29, 0x9f, 0x82, 0xf2, 0xef, 0x14, 0x30, 0x13, 0xd7, 0x70, 0x68, 0x00, + 0x20, 0x60, 0xbc, 0x4c, 0x0b, 0xa2, 0x17, 0x18, 0x38, 0x4c, 0x46, 0xad, 0x94, 0x05, 0xfc, 0x12, + 0x94, 0x45, 0x4f, 0x66, 0xc1, 0xb1, 0x54, 0x16, 0xd0, 0x10, 0x23, 0xf7, 0x72, 0x0b, 0x05, 0x14, + 0x87, 0xe6, 0x05, 0xe6, 0x45, 0x3f, 0xd2, 0x4f, 0x3d, 0x8f, 0x22, 0x7e, 0x6e, 0x14, 0x38, 0x26, + 0xae, 0x78, 0xa6, 0x25, 0x9f, 0x50, 0xff, 0x56, 0x01, 0x8b, 0xcc, 0x51, 0x46, 0x4d, 0x12, 0x15, + 0x57, 0xc0, 0x4c, 0x28, 0xdb, 0xdc, 0xdd, 0xd9, 0xf5, 0xba, 0x91, 0xa5, 0x75, 0x02, 0x95, 0x66, + 0xf1, 0x41, 0xa4, 0x2b, 0x56, 0x82, 0x84, 0x1b, 0x19, 0x1a, 0xf3, 0x93, 0x68, 0x64, 0x90, 0x5c, + 0x86, 0xb8, 0x9f, 0xf2, 0x00, 0x5e, 0xf7, 0x5a, 0xf8, 0x0e, 0x0b, 0xbe, 0x61, 0x9c, 0xf6, 0xc6, + 0x3c, 0x3a, 0x31, 0x24, 0x65, 0xdc, 0xde, 0xbc, 0xd4, 0x8f, 0xf4, 0x73, 0xcf, 0x63, 0xe5, 0x39, + 0xe0, 0xd4, 0x2b, 0xa4, 0x03, 0x37, 0xff, 0xea, 0xef, 0x2b, 0xf7, 0xf3, 0x60, 0xe1, 0x33, 0xdf, + 0xe9, 0xb9, 0x38, 0x21, 0xce, 0x1d, 0x23, 0x4e, 0x1d, 0x12, 0x97, 0xb5, 0x35, 0xcf, 0xf5, 0x23, + 0x7d, 0x63, 0x2a, 0xd2, 0xb2, 0xc0, 0xd7, 0x97, 0xb0, 0x7b, 0x79, 0xb0, 0xbc, 0xed, 0x07, 0x9f, + 0x6c, 0xf1, 0x4b, 0x59, 0xaa, 0x2e, 0xe2, 0x31, 0xda, 0x96, 0x87, 0xb4, 0x31, 0xc4, 0x4d, 0x44, + 0x43, 0xfb, 0x8e, 0xb9, 0xd1, 0x8f, 0xf4, 0xc6, 0x54, 0x94, 0x0d, 0x41, 0xaf, 0x2f, 0x5d, 0x3f, + 0xe7, 0xc1, 0xca, 0xed, 0x1e, 0xf2, 0xa8, 0xed, 0x60, 0x41, 0x59, 0x42, 0xd8, 0xc1, 0x18, 0x61, + 0xda, 0x90, 0xb0, 0x2c, 0x46, 0x52, 0xf7, 0x41, 0x3f, 0xd2, 0x2f, 0x4c, 0x45, 0xdd, 0x24, 0xf8, + 0xeb, 0x4b, 0xe2, 0x8f, 0x45, 0x30, 0xcf, 0x2f, 0x16, 0x09, 0x77, 0x6f, 0x01, 0xb9, 0xe5, 0x4a, + 0xe6, 0x60, 0x7c, 0x46, 0x0b, 0x83, 0xa6, 0xb1, 0x25, 0x37, 0x63, 0x61, 0x01, 0xcf, 0x83, 0x32, + 0xe1, 0x27, 0x21, 0x59, 0x50, 0xb5, 0xd1, 0x5b, 0x43, 0xf6, 0xcc, 0xb5, 0x99, 0xb3, 0xa4, 0x3d, + 0xbb, 0xc3, 0x39, 0xec, 0x00, 0x10, 0x9f, 0x04, 0xeb, 0xa3, 0xc8, 0xf1, 0xe3, 0x01, 0x43, 0x0b, + 0x0c, 0x3c, 0x0b, 0x4a, 0xbc, 0x72, 0xcb, 0x7b, 0x78, 0xe6, 0xb1, 0xe3, 0x25, 0x74, 0x33, 0x67, + 0x09, 0x73, 0xb8, 0x0e, 0x8a, 0x41, 0xe8, 0xbb, 0x72, 0x17, 0x3d, 0x31, 0xfa, 0xcc, 0xf4, 0xb6, + 0xb3, 0x99, 0xb3, 0xb8, 0x2d, 0x3c, 0xc3, 0x8e, 0xbc, 0x6c, 0xbf, 0x22, 0xfc, 0x0a, 0xc1, 0x4a, + 0xd6, 0x08, 0x2c, 0x05, 0x89, 0x4d, 0xe1, 0x19, 0x50, 0xde, 0xe7, 0x65, 0x49, 0x5e, 0xfe, 0x56, + 0xd3, 0xa0, 0x6c, 0xc1, 0x62, 0xef, 0x25, 0x6c, 0xe1, 0x35, 0x30, 0x47, 0xfd, 0xa0, 0x1b, 0x17, + 0x00, 0x79, 0xfd, 0xa8, 0xa5, 0xb1, 0x93, 0x0a, 0xc4, 0x66, 0xce, 0xca, 0xe0, 0xe0, 0x2d, 0xb0, + 0xb8, 0x97, 0x09, 0x53, 0x4c, 0xf8, 0xd7, 0x8c, 0x11, 0x9e, 0x27, 0x67, 0xcf, 0x66, 0xce, 0x1a, + 0x43, 0x9b, 0x60, 0x98, 0x51, 0xf5, 0x3f, 0x0a, 0x60, 0x4e, 0xc6, 0x8c, 0xb8, 0x2b, 0x9c, 0x4b, + 0xc2, 0x40, 0x84, 0xcc, 0xff, 0x9f, 0x15, 0x06, 0xdc, 0x3c, 0x15, 0x05, 0xef, 0x26, 0x51, 0x20, + 0xe2, 0x67, 0x65, 0x98, 0xa5, 0x5c, 0xff, 0x14, 0x42, 0x2a, 0xbf, 0x11, 0x2b, 0x2f, 0xc2, 0xe6, + 0xf8, 0xe4, 0x7d, 0x37, 0x46, 0x49, 0xd9, 0x2f, 0x82, 0x8a, 0x2d, 0x3e, 0x37, 0x4c, 0x0a, 0x98, + 0xf1, 0xaf, 0x11, 0x4c, 0x48, 0x09, 0x80, 0x1b, 0x43, 0xf9, 0x45, 0xd4, 0x1c, 0x1b, 0x97, 0x3f, + 0x01, 0xc5, 0xea, 0x9f, 0x4e, 0xd4, 0x2f, 0x4b, 0xcc, 0xd8, 0x66, 0x95, 0xbc, 0x98, 0x94, 0x7e, + 0x13, 0xcc, 0xb8, 0x98, 0x22, 0x76, 0x96, 0x55, 0x2b, 0xbc, 0x6e, 0x9c, 0xcc, 0x4a, 0x35, 0xe4, + 0xdb, 0xb8, 0x29, 0x0d, 0xaf, 0x7a, 0x34, 0x3c, 0x90, 0xc7, 0x96, 0x04, 0xbd, 0x7a, 0x09, 0xcc, + 0x67, 0x0c, 0xe0, 0x22, 0x28, 0x74, 0x71, 0xfc, 0x65, 0x85, 0x35, 0xd9, 0xe5, 0x6e, 0x1f, 0x39, + 0x3d, 0xcc, 0x69, 0xaf, 0x5a, 0xa2, 0x73, 0x31, 0x7f, 0x5e, 0x31, 0xab, 0xa0, 0x12, 0x8a, 0xa7, + 0x98, 0xad, 0x87, 0x8f, 0xb5, 0xdc, 0xa3, 0xc7, 0x5a, 0xee, 0xe9, 0x63, 0x4d, 0xf9, 0x7a, 0xa0, + 0x29, 0xdf, 0x0f, 0x34, 0xe5, 0xc1, 0x40, 0x53, 0x1e, 0x0e, 0x34, 0xe5, 0xf7, 0x81, 0xa6, 0xfc, + 0x39, 0xd0, 0x72, 0x4f, 0x07, 0x9a, 0x72, 0xf7, 0x89, 0x96, 0x7b, 0xf8, 0x44, 0xcb, 0x3d, 0x7a, + 0xa2, 0xe5, 0x3e, 0x37, 0x8e, 0x56, 0xc2, 0x76, 0xca, 0x9c, 0x96, 0x8d, 0xbf, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xe6, 0x4a, 0x9a, 0x06, 0x55, 0x15, 0x00, 0x00, } func (this *LokiRequest) Equal(that interface{}) bool { @@ -1334,6 +1380,30 @@ func (this *LokiInstantRequest) Equal(that interface{}) bool { } return true } +func (this *Plan) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Plan) + if !ok { + that2, ok := that.(Plan) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Raw, that1.Raw) { + return false + } + return true +} func (this *LokiResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2170,6 +2240,16 @@ func (this *LokiInstantRequest) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *Plan) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&queryrange.Plan{") + s = append(s, "Raw: "+fmt.Sprintf("%#v", this.Raw)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *LokiResponse) GoString() string { if this == nil { return "nil" @@ -2522,21 +2602,21 @@ func (m *LokiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x30 } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintQueryrange(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x2a - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) if err2 != nil { return 0, err2 } i -= n2 i = encodeVarintQueryrange(dAtA, i, uint64(n2)) i-- + dAtA[i] = 0x2a + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintQueryrange(dAtA, i, uint64(n3)) + i-- dAtA[i] = 0x22 if m.Step != 0 { i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) @@ -2611,12 +2691,12 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x20 } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.TimeTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.TimeTs):]) - if err3 != nil { - return 0, err3 + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.TimeTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.TimeTs):]) + if err5 != nil { + return 0, err5 } - i -= n3 - i = encodeVarintQueryrange(dAtA, i, uint64(n3)) + i -= n5 + i = encodeVarintQueryrange(dAtA, i, uint64(n5)) i-- dAtA[i] = 0x1a if m.Limit != 0 { @@ -2634,6 +2714,36 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Plan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Plan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Plan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Raw) > 0 { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *LokiResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2763,20 +2873,20 @@ func (m *LokiSeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) - if err6 != nil { - return 0, err6 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) + if err8 != nil { + return 0, err8 } - i -= n6 - i = encodeVarintQueryrange(dAtA, i, uint64(n6)) + i -= n8 + i = encodeVarintQueryrange(dAtA, i, uint64(n8)) i-- dAtA[i] = 0x1a - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) - if err7 != nil { - return 0, err7 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + if err9 != nil { + return 0, err9 } - i -= n7 - i = encodeVarintQueryrange(dAtA, i, uint64(n7)) + i -= n9 + i = encodeVarintQueryrange(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x12 if len(m.Match) > 0 { @@ -3681,6 +3791,19 @@ func (m *LokiInstantRequest) Size() (n int) { return n } +func (m *Plan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Raw) + if l > 0 { + n += 1 + l + sovQueryrange(uint64(l)) + } + return n +} + func (m *LokiResponse) Size() (n int) { if m == nil { return 0 @@ -4166,6 +4289,16 @@ func (this *LokiInstantRequest) String() string { }, "") return s } +func (this *Plan) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Plan{`, + `Raw:` + fmt.Sprintf("%v", this.Raw) + `,`, + `}`, + }, "") + return s +} func (this *LokiResponse) String() string { if this == nil { return "nil" @@ -4748,7 +4881,7 @@ func (m *LokiRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQueryrange @@ -4758,23 +4891,24 @@ func (m *LokiRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthQueryrange } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthQueryrange } if postIndex > l { return io.ErrUnexpectedEOF } - var v github_com_grafana_loki_pkg_querier_plan.QueryPlan - m.Plan = &v + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -5003,6 +5137,95 @@ func (m *LokiInstantRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &github_com_grafana_loki_pkg_querier_plan.QueryPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQueryrange(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQueryrange + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQueryrange + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Plan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Plan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Plan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -5028,10 +5251,9 @@ func (m *LokiInstantRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var v github_com_grafana_loki_pkg_querier_plan.QueryPlan - m.Plan = &v - if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} } iNdEx = postIndex default: diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index 8eb43e34ca160..f673464acfc0b 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -18,7 +18,7 @@ option (gogoproto.sizer_all) = true; option (gogoproto.unmarshaler_all) = true; message LokiRequest { - string query = 1; + string query = 1; // mark as reserved once we've fully migrated to plan. uint32 limit = 2; int64 step = 3; int64 interval = 9; @@ -33,7 +33,7 @@ message LokiRequest { logproto.Direction direction = 6; string path = 7; repeated string shards = 8 [(gogoproto.jsontag) = "shards"]; - bytes plan = 10 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; + Plan plan = 10 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; } message LokiInstantRequest { @@ -46,7 +46,11 @@ message LokiInstantRequest { logproto.Direction direction = 4; string path = 5; repeated string shards = 6 [(gogoproto.jsontag) = "shards"]; - bytes plan = 7 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; + Plan plan = 7 [(gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan"]; +} + +message Plan { + bytes raw = 1; } message LokiResponse { diff --git a/pkg/querier/queryrange/queryrangebase/alias.go b/pkg/querier/queryrange/queryrangebase/alias.go index 01bf345cc48de..4b4e219202f0b 100644 --- a/pkg/querier/queryrange/queryrangebase/alias.go +++ b/pkg/querier/queryrange/queryrangebase/alias.go @@ -1,6 +1,9 @@ package queryrangebase -import "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" +import ( + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" +) // Helpful aliases for refactoring circular imports @@ -9,5 +12,8 @@ type PrometheusResponseHeader = definitions.PrometheusResponseHeader type PrometheusRequestHeader = definitions.PrometheusRequestHeader type Codec = definitions.Codec type Merger = definitions.Merger +type CacheGenNumberLoader = resultscache.CacheGenNumberLoader + type Request = definitions.Request type Response = definitions.Response +type Extent = resultscache.Extent diff --git a/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go b/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go index f9ee4b21059bb..d4eb4fb83b25f 100644 --- a/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go +++ b/pkg/querier/queryrange/queryrangebase/definitions/definitions.pb.go @@ -25,50 +25,6 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Defined here to prevent circular imports between logproto & queryrangebase -type CachingOptions struct { - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (m *CachingOptions) Reset() { *m = CachingOptions{} } -func (*CachingOptions) ProtoMessage() {} -func (*CachingOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_d1a37772b6ae2c5c, []int{0} -} -func (m *CachingOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CachingOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CachingOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CachingOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CachingOptions.Merge(m, src) -} -func (m *CachingOptions) XXX_Size() int { - return m.Size() -} -func (m *CachingOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CachingOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_CachingOptions proto.InternalMessageInfo - -func (m *CachingOptions) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - type PrometheusRequestHeader struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"-"` Values []string `protobuf:"bytes,2,rep,name=Values,proto3" json:"-"` @@ -77,7 +33,7 @@ type PrometheusRequestHeader struct { func (m *PrometheusRequestHeader) Reset() { *m = PrometheusRequestHeader{} } func (*PrometheusRequestHeader) ProtoMessage() {} func (*PrometheusRequestHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d1a37772b6ae2c5c, []int{1} + return fileDescriptor_d1a37772b6ae2c5c, []int{0} } func (m *PrometheusRequestHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -128,7 +84,7 @@ type PrometheusResponseHeader struct { func (m *PrometheusResponseHeader) Reset() { *m = PrometheusResponseHeader{} } func (*PrometheusResponseHeader) ProtoMessage() {} func (*PrometheusResponseHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d1a37772b6ae2c5c, []int{2} + return fileDescriptor_d1a37772b6ae2c5c, []int{1} } func (m *PrometheusResponseHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -172,7 +128,6 @@ func (m *PrometheusResponseHeader) GetValues() []string { } func init() { - proto.RegisterType((*CachingOptions)(nil), "definitions.CachingOptions") proto.RegisterType((*PrometheusRequestHeader)(nil), "definitions.PrometheusRequestHeader") proto.RegisterType((*PrometheusResponseHeader)(nil), "definitions.PrometheusResponseHeader") } @@ -182,52 +137,26 @@ func init() { } var fileDescriptor_d1a37772b6ae2c5c = []byte{ - // 294 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0x31, 0x4e, 0x33, 0x31, - 0x10, 0x85, 0xed, 0xff, 0x87, 0x28, 0x18, 0x89, 0x62, 0x85, 0x44, 0x88, 0xc4, 0x10, 0xa5, 0xa2, - 0x80, 0xb8, 0xe0, 0x06, 0x49, 0x03, 0x0d, 0xa0, 0x80, 0x28, 0xe8, 0xbc, 0xd9, 0x89, 0x63, 0x25, - 0xb1, 0x37, 0xf6, 0x6e, 0x41, 0x05, 0x47, 0xe0, 0x18, 0x1c, 0x85, 0x72, 0xcb, 0x54, 0x88, 0xf5, - 0x36, 0x88, 0x2a, 0x47, 0x40, 0x18, 0x04, 0xdb, 0x22, 0xaa, 0x79, 0xf3, 0xcd, 0x9b, 0x57, 0xcc, - 0xb0, 0x41, 0x3a, 0x95, 0x7c, 0x91, 0xa3, 0x55, 0x68, 0x43, 0xbd, 0xb5, 0x42, 0x4b, 0xac, 0xc9, - 0x58, 0x38, 0xe4, 0x09, 0x8e, 0x95, 0x56, 0x99, 0x32, 0xda, 0xd5, 0x75, 0x2f, 0xb5, 0x26, 0x33, - 0xd1, 0x66, 0x0d, 0xb5, 0xb7, 0xa5, 0x91, 0x26, 0x70, 0xfe, 0xa1, 0x3e, 0x2d, 0xdd, 0x43, 0xb6, - 0x35, 0x10, 0xa3, 0x89, 0xd2, 0xf2, 0x3c, 0x0d, 0xbe, 0xa8, 0xcd, 0x9a, 0x89, 0x72, 0x22, 0x9e, - 0x61, 0xd2, 0xa2, 0x1d, 0x7a, 0xd0, 0x1c, 0x7e, 0xf7, 0xdd, 0x4b, 0xb6, 0x73, 0x61, 0xcd, 0x1c, - 0xb3, 0x09, 0xe6, 0x6e, 0x88, 0x8b, 0x1c, 0x5d, 0x76, 0x82, 0x22, 0x41, 0x1b, 0xed, 0xb2, 0xb5, - 0x33, 0x31, 0xc7, 0xb0, 0xb2, 0xd1, 0x5f, 0x7f, 0x7b, 0xde, 0xa7, 0x47, 0xc3, 0x80, 0xa2, 0x3d, - 0xd6, 0xb8, 0x16, 0xb3, 0x1c, 0x5d, 0xeb, 0x5f, 0xe7, 0xff, 0xcf, 0xf0, 0x0b, 0x76, 0xaf, 0x58, - 0xab, 0x1e, 0xea, 0x52, 0xa3, 0x1d, 0xfe, 0x35, 0xb5, 0x7f, 0x57, 0x94, 0x40, 0x96, 0x25, 0x90, - 0x55, 0x09, 0xf4, 0xde, 0x03, 0x7d, 0xf4, 0x40, 0x9f, 0x3c, 0xd0, 0xc2, 0x03, 0x7d, 0xf1, 0x40, - 0x5f, 0x3d, 0x90, 0x95, 0x07, 0xfa, 0x50, 0x01, 0x29, 0x2a, 0x20, 0xcb, 0x0a, 0xc8, 0xcd, 0xa9, - 0x54, 0xd9, 0x24, 0x8f, 0x7b, 0x23, 0x33, 0xe7, 0xd2, 0x8a, 0xb1, 0xd0, 0x82, 0xcf, 0xcc, 0x54, - 0xf1, 0x5f, 0xbf, 0x23, 0x6e, 0x84, 0x03, 0x1f, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x09, 0x36, - 0xa9, 0xa5, 0xca, 0x01, 0x00, 0x00, + // 262 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x2e, 0xc8, 0x4e, 0xd7, + 0x2f, 0x2c, 0x4d, 0x2d, 0xca, 0x4c, 0x2d, 0x02, 0xd3, 0x95, 0x45, 0x89, 0x79, 0xe9, 0xa9, 0x48, + 0xcc, 0xa4, 0xc4, 0xe2, 0x54, 0xfd, 0x94, 0xd4, 0xb4, 0xcc, 0xbc, 0xcc, 0x92, 0xcc, 0xfc, 0xbc, + 0x62, 0x64, 0xb6, 0x5e, 0x41, 0x51, 0x7e, 0x49, 0xbe, 0x10, 0x37, 0x92, 0x90, 0x94, 0x48, 0x7a, + 0x7e, 0x7a, 0x3e, 0x58, 0x5c, 0x1f, 0xc4, 0x82, 0x28, 0x51, 0x0a, 0xe6, 0x12, 0x0f, 0x28, 0xca, + 0xcf, 0x4d, 0x2d, 0xc9, 0x48, 0x2d, 0x2d, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0xf1, 0x48, + 0x4d, 0x4c, 0x49, 0x2d, 0x12, 0x92, 0xe4, 0x62, 0xf1, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x74, 0x62, 0x7d, 0x75, 0x4f, 0x9e, 0x51, 0x37, 0x08, 0x2c, 0x24, 0x24, 0xcb, 0xc5, + 0x16, 0x96, 0x98, 0x53, 0x9a, 0x5a, 0x2c, 0xc1, 0xa4, 0xc0, 0x8c, 0x90, 0x84, 0x0a, 0x2a, 0x85, + 0x70, 0x49, 0x20, 0x1b, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x4a, 0xa9, 0xa9, 0x4e, 0xf5, 0x17, + 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, + 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, + 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, + 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x3c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, + 0xf3, 0x73, 0xf5, 0xd3, 0x8b, 0x12, 0xd3, 0x12, 0xf3, 0x12, 0xf5, 0x73, 0xf2, 0xb3, 0x33, 0xf5, + 0x49, 0x0e, 0xe0, 0x24, 0x36, 0x70, 0x90, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x51, 0x1b, + 0x61, 0xc9, 0x9c, 0x01, 0x00, 0x00, } -func (this *CachingOptions) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CachingOptions) - if !ok { - that2, ok := that.(CachingOptions) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Disabled != that1.Disabled { - return false - } - return true -} func (this *PrometheusRequestHeader) Equal(that interface{}) bool { if that == nil { return this == nil @@ -292,16 +221,6 @@ func (this *PrometheusResponseHeader) Equal(that interface{}) bool { } return true } -func (this *CachingOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&definitions.CachingOptions{") - s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} func (this *PrometheusRequestHeader) GoString() string { if this == nil { return "nil" @@ -332,39 +251,6 @@ func valueToGoStringDefinitions(v interface{}, typ string) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func (m *CachingOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CachingOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CachingOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *PrometheusRequestHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -454,18 +340,6 @@ func encodeVarintDefinitions(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *CachingOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Disabled { - n += 2 - } - return n -} - func (m *PrometheusRequestHeader) Size() (n int) { if m == nil { return 0 @@ -510,16 +384,6 @@ func sovDefinitions(x uint64) (n int) { func sozDefinitions(x uint64) (n int) { return sovDefinitions(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *CachingOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CachingOptions{`, - `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, - `}`, - }, "") - return s -} func (this *PrometheusRequestHeader) String() string { if this == nil { return "nil" @@ -550,79 +414,6 @@ func valueToStringDefinitions(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CachingOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDefinitions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CachingOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CachingOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDefinitions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipDefinitions(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDefinitions - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDefinitions - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *PrometheusRequestHeader) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto b/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto index fdac8a5daa08a..9f5e7967c8524 100644 --- a/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto +++ b/pkg/querier/queryrange/queryrangebase/definitions/definitions.proto @@ -8,11 +8,6 @@ option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangeba option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; -// Defined here to prevent circular imports between logproto & queryrangebase -message CachingOptions { - bool disabled = 1; -} - message PrometheusRequestHeader { string Name = 1 [(gogoproto.jsontag) = "-"]; repeated string Values = 2 [(gogoproto.jsontag) = "-"]; diff --git a/pkg/querier/queryrange/queryrangebase/definitions/interface.go b/pkg/querier/queryrange/queryrangebase/definitions/interface.go index 0f5be9b10676e..f8c9a0f5531fb 100644 --- a/pkg/querier/queryrange/queryrangebase/definitions/interface.go +++ b/pkg/querier/queryrange/queryrangebase/definitions/interface.go @@ -7,6 +7,8 @@ import ( "github.com/gogo/protobuf/proto" "github.com/opentracing/opentracing-go" + + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" ) // Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares. @@ -32,6 +34,7 @@ type Merger interface { // Request represents a query range request that can be process by middlewares. type Request interface { + proto.Message // GetStart returns the start timestamp of the request in milliseconds. GetStart() time.Time // GetEnd returns the end timestamp of the request in milliseconds. @@ -46,11 +49,12 @@ type Request interface { WithStartEnd(start time.Time, end time.Time) Request // WithQuery clone the current request with a different query. WithQuery(string) Request - proto.Message // LogToSpan writes information about this request to an OpenTracing span LogToSpan(opentracing.Span) } +type CachingOptions = resultscache.CachingOptions + // Response represents a query range response. type Response interface { proto.Message diff --git a/pkg/querier/queryrange/queryrangebase/middleware.go b/pkg/querier/queryrange/queryrangebase/middleware.go index 8ed3368faf113..10e80ddf8a2ec 100644 --- a/pkg/querier/queryrange/queryrangebase/middleware.go +++ b/pkg/querier/queryrange/queryrangebase/middleware.go @@ -6,6 +6,8 @@ import ( "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/tenant" + + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" ) const ( @@ -13,7 +15,7 @@ const ( ResultsCacheGenNumberHeaderName = "Results-Cache-Gen-Number" ) -func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLoader) middleware.Interface { +func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader resultscache.CacheGenNumberLoader) middleware.Interface { return middleware.Func(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { userIDs, err := tenant.TenantIDs(r.Context()) @@ -30,7 +32,7 @@ func CacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLo }) } -func CacheGenNumberContextSetterMiddleware(cacheGenNumbersLoader CacheGenNumberLoader) Middleware { +func CacheGenNumberContextSetterMiddleware(cacheGenNumbersLoader resultscache.CacheGenNumberLoader) Middleware { return MiddlewareFunc(func(next Handler) Handler { return HandlerFunc(func(ctx context.Context, req Request) (Response, error) { userIDs, err := tenant.TenantIDs(ctx) diff --git a/pkg/querier/queryrange/queryrangebase/query_range.go b/pkg/querier/queryrange/queryrangebase/query_range.go index 5c76a4a80ee9e..ed2bf48c6757f 100644 --- a/pkg/querier/queryrange/queryrangebase/query_range.go +++ b/pkg/querier/queryrange/queryrangebase/query_range.go @@ -20,6 +20,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util/spanlogger" ) @@ -54,6 +55,12 @@ func (q *PrometheusRequest) WithStartEnd(start, end time.Time) Request { return &clone } +// WithStartEndForCache implements resultscache.Request. +func (q *PrometheusRequest) WithStartEndForCache(s time.Time, e time.Time) resultscache.Request { + clone := q.WithStartEnd(s, e).(resultscache.Request) + return clone +} + // WithQuery clones the current `PrometheusRequest` with a new query. func (q *PrometheusRequest) WithQuery(query string) Request { clone := *q diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go index 121b3ffb15351..f908b3621dcf6 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go @@ -7,12 +7,13 @@ import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" _ "github.com/golang/protobuf/ptypes/duration" github_com_grafana_loki_pkg_logproto "github.com/grafana/loki/pkg/logproto" logproto "github.com/grafana/loki/pkg/logproto" definitions "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions" + resultscache "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" io "io" math "math" math_bits "math/bits" @@ -40,7 +41,7 @@ type PrometheusRequest struct { Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` Timeout time.Duration `protobuf:"bytes,5,opt,name=timeout,proto3,stdduration" json:"timeout"` Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"` - CachingOptions definitions.CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"` + CachingOptions resultscache.CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"` Headers []*definitions.PrometheusRequestHeader `protobuf:"bytes,8,rep,name=Headers,proto3" json:"-"` } @@ -118,11 +119,11 @@ func (m *PrometheusRequest) GetQuery() string { return "" } -func (m *PrometheusRequest) GetCachingOptions() definitions.CachingOptions { +func (m *PrometheusRequest) GetCachingOptions() resultscache.CachingOptions { if m != nil { return m.CachingOptions } - return definitions.CachingOptions{} + return resultscache.CachingOptions{} } func (m *PrometheusRequest) GetHeaders() []*definitions.PrometheusRequestHeader { @@ -302,132 +303,11 @@ func (m *SampleStream) GetSamples() []logproto.LegacySample { return nil } -type CachedResponse struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"` - // List of cached responses; non-overlapping and in order. - Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"` -} - -func (m *CachedResponse) Reset() { *m = CachedResponse{} } -func (*CachedResponse) ProtoMessage() {} -func (*CachedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4cc6a0c1d6b614c4, []int{4} -} -func (m *CachedResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CachedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CachedResponse.Merge(m, src) -} -func (m *CachedResponse) XXX_Size() int { - return m.Size() -} -func (m *CachedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CachedResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CachedResponse proto.InternalMessageInfo - -func (m *CachedResponse) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *CachedResponse) GetExtents() []Extent { - if m != nil { - return m.Extents - } - return nil -} - -type Extent struct { - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"` - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"` - TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"` - Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"` -} - -func (m *Extent) Reset() { *m = Extent{} } -func (*Extent) ProtoMessage() {} -func (*Extent) Descriptor() ([]byte, []int) { - return fileDescriptor_4cc6a0c1d6b614c4, []int{5} -} -func (m *Extent) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Extent.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Extent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Extent.Merge(m, src) -} -func (m *Extent) XXX_Size() int { - return m.Size() -} -func (m *Extent) XXX_DiscardUnknown() { - xxx_messageInfo_Extent.DiscardUnknown(m) -} - -var xxx_messageInfo_Extent proto.InternalMessageInfo - -func (m *Extent) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *Extent) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} - -func (m *Extent) GetTraceId() string { - if m != nil { - return m.TraceId - } - return "" -} - -func (m *Extent) GetResponse() *types.Any { - if m != nil { - return m.Response - } - return nil -} - func init() { proto.RegisterType((*PrometheusRequest)(nil), "queryrangebase.PrometheusRequest") proto.RegisterType((*PrometheusResponse)(nil), "queryrangebase.PrometheusResponse") proto.RegisterType((*PrometheusData)(nil), "queryrangebase.PrometheusData") proto.RegisterType((*SampleStream)(nil), "queryrangebase.SampleStream") - proto.RegisterType((*CachedResponse)(nil), "queryrangebase.CachedResponse") - proto.RegisterType((*Extent)(nil), "queryrangebase.Extent") } func init() { @@ -435,60 +315,54 @@ func init() { } var fileDescriptor_4cc6a0c1d6b614c4 = []byte{ - // 846 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xdc, 0x44, - 0x14, 0x5f, 0xc7, 0xfb, 0x77, 0x5a, 0x6d, 0x61, 0x1a, 0x15, 0x27, 0x45, 0xf6, 0x6a, 0x05, 0x52, - 0x90, 0xc0, 0x2b, 0x8a, 0xe8, 0x01, 0xa9, 0x88, 0x38, 0x09, 0xa2, 0x55, 0x25, 0x2a, 0xa7, 0x27, - 0x2e, 0x68, 0x76, 0xfd, 0xe2, 0x58, 0xf1, 0xbf, 0xce, 0x8c, 0x2b, 0xf6, 0xc6, 0x89, 0x73, 0x4f, - 0x88, 0x8f, 0xc0, 0x01, 0xf1, 0x39, 0x72, 0xcc, 0xb1, 0xe2, 0x60, 0xc8, 0xe6, 0x82, 0x7c, 0xea, - 0x47, 0x40, 0xf3, 0xc7, 0xbb, 0xde, 0x8d, 0x02, 0xf4, 0xb2, 0xfb, 0x66, 0xde, 0xef, 0xbd, 0xf7, - 0x7b, 0xbf, 0x79, 0x7e, 0xe8, 0x61, 0x7e, 0x16, 0x4e, 0x5e, 0x14, 0x40, 0x23, 0xa0, 0xf2, 0x7f, - 0x4e, 0x49, 0x1a, 0x42, 0xc3, 0x9c, 0x12, 0xd6, 0x3c, 0xba, 0x39, 0xcd, 0x78, 0x86, 0x87, 0xeb, - 0x80, 0xdd, 0xed, 0x30, 0x0b, 0x33, 0xe9, 0x9a, 0x08, 0x4b, 0xa1, 0x76, 0x77, 0xc2, 0x2c, 0x0b, - 0x63, 0x98, 0xc8, 0xd3, 0xb4, 0x38, 0x99, 0x90, 0x74, 0xae, 0x5d, 0xf6, 0xa6, 0x2b, 0x28, 0x28, - 0xe1, 0x51, 0x96, 0x6a, 0xbf, 0xb3, 0xe9, 0xe7, 0x51, 0x02, 0x8c, 0x93, 0x24, 0xd7, 0x80, 0xfb, - 0x82, 0x79, 0x9c, 0x85, 0xaa, 0x68, 0x6d, 0x68, 0xe7, 0xc1, 0xff, 0x6b, 0x2b, 0x80, 0x93, 0x28, - 0x8d, 0x44, 0x55, 0xd6, 0xb4, 0x55, 0x92, 0xf1, 0xcf, 0x26, 0x7a, 0xf7, 0x19, 0xcd, 0x12, 0xe0, - 0xa7, 0x50, 0x30, 0x1f, 0x5e, 0x14, 0xc0, 0x38, 0xc6, 0xa8, 0x9d, 0x13, 0x7e, 0x6a, 0x19, 0x23, - 0x63, 0x6f, 0xe0, 0x4b, 0x1b, 0x7f, 0x81, 0x3a, 0x8c, 0x13, 0xca, 0xad, 0xad, 0x91, 0xb1, 0x77, - 0xeb, 0xc1, 0xae, 0xab, 0xc8, 0xbb, 0x35, 0x79, 0xf7, 0x79, 0x4d, 0xde, 0xeb, 0x9f, 0x97, 0x4e, - 0xeb, 0xd5, 0x9f, 0x8e, 0xe1, 0xab, 0x10, 0xfc, 0x10, 0x99, 0x90, 0x06, 0x96, 0xf9, 0x16, 0x91, - 0x22, 0x40, 0xf0, 0x60, 0x1c, 0x72, 0xab, 0x3d, 0x32, 0xf6, 0x4c, 0x5f, 0xda, 0xf8, 0x11, 0xea, - 0x09, 0x99, 0xb2, 0x82, 0x5b, 0x1d, 0x99, 0x6f, 0xe7, 0x5a, 0xbe, 0x43, 0x2d, 0xb3, 0x4a, 0xf7, - 0x8b, 0x48, 0x57, 0xc7, 0xe0, 0x6d, 0xd4, 0x91, 0x02, 0x59, 0x5d, 0xd9, 0x9b, 0x3a, 0xe0, 0xc7, - 0x68, 0x38, 0x23, 0xb3, 0xd3, 0x28, 0x0d, 0xbf, 0xcd, 0xa5, 0x3c, 0x56, 0x4f, 0xe6, 0xbe, 0xef, - 0x36, 0x25, 0x3b, 0x58, 0x83, 0x78, 0x6d, 0x91, 0xdd, 0xdf, 0x08, 0xc4, 0x47, 0xa8, 0xf7, 0x0d, - 0x90, 0x00, 0x28, 0xb3, 0xfa, 0x23, 0x73, 0xef, 0xd6, 0x83, 0x0f, 0xd6, 0x72, 0x5c, 0x13, 0x5b, - 0x81, 0xbd, 0x4e, 0x55, 0x3a, 0xc6, 0x27, 0x7e, 0x1d, 0x3b, 0xfe, 0x7d, 0x0b, 0xe1, 0x26, 0x96, - 0xe5, 0x59, 0xca, 0x00, 0x8f, 0x51, 0xf7, 0x98, 0x13, 0x5e, 0x30, 0xf5, 0x36, 0x1e, 0xaa, 0x4a, - 0xa7, 0xcb, 0xe4, 0x8d, 0xaf, 0x3d, 0xf8, 0x09, 0x6a, 0x1f, 0x12, 0x4e, 0xf4, 0x43, 0xd9, 0xee, - 0xfa, 0x40, 0x34, 0x18, 0x08, 0x94, 0x77, 0x4f, 0x74, 0x51, 0x95, 0xce, 0x30, 0x20, 0x9c, 0x7c, - 0x9c, 0x25, 0x11, 0x87, 0x24, 0xe7, 0x73, 0x5f, 0xe6, 0xc0, 0x9f, 0xa3, 0xc1, 0x11, 0xa5, 0x19, - 0x7d, 0x3e, 0xcf, 0x41, 0xbe, 0xdf, 0xc0, 0x7b, 0xaf, 0x2a, 0x9d, 0xbb, 0x50, 0x5f, 0x36, 0x22, - 0x56, 0x48, 0xfc, 0x11, 0xea, 0xc8, 0x83, 0x7c, 0xb9, 0x81, 0x77, 0xb7, 0x2a, 0x9d, 0x3b, 0x32, - 0xa4, 0x01, 0x57, 0x08, 0xfc, 0xf5, 0x4a, 0xaf, 0x8e, 0xd4, 0xeb, 0xc3, 0x1b, 0xf5, 0x52, 0x1a, - 0xdc, 0x20, 0xd8, 0x4f, 0x06, 0x1a, 0xae, 0xb7, 0x86, 0x5d, 0x84, 0x7c, 0x60, 0x45, 0xcc, 0x25, - 0x7b, 0x25, 0xd8, 0xb0, 0x2a, 0x1d, 0x44, 0x97, 0xb7, 0x7e, 0x03, 0x81, 0x0f, 0x51, 0x57, 0x9d, - 0xac, 0x2d, 0xc9, 0xe4, 0xfd, 0x4d, 0xe9, 0x8e, 0x49, 0x92, 0xc7, 0x70, 0xcc, 0x29, 0x90, 0xc4, - 0x1b, 0x6a, 0xe1, 0xba, 0x2a, 0x9b, 0xaf, 0x63, 0xc7, 0xe7, 0x06, 0xba, 0xdd, 0x04, 0xe2, 0x97, - 0xa8, 0x1b, 0x93, 0x29, 0xc4, 0xe2, 0xcd, 0x4c, 0x39, 0xb0, 0xcb, 0x2f, 0xf9, 0x29, 0x84, 0x64, - 0x36, 0x7f, 0x2a, 0xbc, 0xcf, 0x48, 0x44, 0xbd, 0x03, 0x91, 0xf3, 0x8f, 0xd2, 0xf9, 0x34, 0x8c, - 0xf8, 0x69, 0x31, 0x75, 0x67, 0x59, 0x32, 0x09, 0x29, 0x39, 0x21, 0x29, 0x99, 0xc4, 0xd9, 0x59, - 0x34, 0x69, 0x2e, 0x04, 0x57, 0xc6, 0xed, 0x07, 0x24, 0xe7, 0x40, 0x05, 0x91, 0x04, 0x38, 0x8d, - 0x66, 0xbe, 0xae, 0x86, 0xbf, 0x42, 0x3d, 0x26, 0x79, 0x30, 0xdd, 0xcf, 0xbd, 0xcd, 0xc2, 0x8a, - 0xe6, 0xaa, 0x93, 0x97, 0x24, 0x2e, 0x80, 0xf9, 0x75, 0xd8, 0x38, 0x45, 0x43, 0x31, 0xf3, 0x10, - 0x2c, 0xe7, 0x6f, 0x07, 0x99, 0x67, 0x30, 0xd7, 0x5a, 0xf6, 0xaa, 0xd2, 0x11, 0x47, 0x5f, 0xfc, - 0xe0, 0x7d, 0xd4, 0x83, 0x1f, 0x38, 0xa4, 0x7c, 0x55, 0x6e, 0x43, 0xbe, 0x23, 0xe9, 0xf6, 0xee, - 0xe8, 0x72, 0x35, 0xdc, 0xaf, 0x8d, 0xf1, 0x6f, 0x06, 0xea, 0x2a, 0x10, 0x76, 0xea, 0x75, 0x23, - 0x4a, 0x99, 0xde, 0xa0, 0x2a, 0x1d, 0x75, 0x51, 0xef, 0x94, 0x1d, 0xb5, 0x53, 0xb6, 0xa4, 0x5b, - 0x32, 0x81, 0x34, 0x50, 0x6b, 0x63, 0x84, 0xfa, 0x9c, 0x92, 0x19, 0x7c, 0x1f, 0x05, 0x7a, 0x00, - 0xeb, 0x61, 0x91, 0xd7, 0x8f, 0x03, 0xfc, 0x25, 0xea, 0x53, 0xdd, 0x92, 0xde, 0x22, 0xdb, 0xd7, - 0xb6, 0xc8, 0x7e, 0x3a, 0xf7, 0x6e, 0x57, 0xa5, 0xb3, 0x44, 0xfa, 0x4b, 0xeb, 0x49, 0xbb, 0x6f, - 0xbe, 0xd3, 0xf6, 0xd8, 0xc5, 0xa5, 0xdd, 0x7a, 0x7d, 0x69, 0xb7, 0xde, 0x5c, 0xda, 0xc6, 0x8f, - 0x0b, 0xdb, 0xf8, 0x75, 0x61, 0x1b, 0xe7, 0x0b, 0xdb, 0xb8, 0x58, 0xd8, 0xc6, 0x5f, 0x0b, 0xdb, - 0xf8, 0x7b, 0x61, 0xb7, 0xde, 0x2c, 0x6c, 0xe3, 0xd5, 0x95, 0xdd, 0xba, 0xb8, 0xb2, 0x5b, 0xaf, - 0xaf, 0xec, 0xd6, 0x77, 0x8f, 0xfe, 0xed, 0x6d, 0xff, 0x73, 0x9f, 0x4f, 0xbb, 0x92, 0xe0, 0x67, - 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x63, 0x5c, 0x0b, 0x88, 0xd6, 0x06, 0x00, 0x00, + // 739 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4f, 0xdb, 0x48, + 0x18, 0x8d, 0xc9, 0x2f, 0x32, 0xac, 0xb2, 0xda, 0x01, 0xb1, 0x5e, 0x16, 0x8d, 0xa3, 0x68, 0x57, + 0xca, 0x4a, 0x5b, 0xbb, 0xa5, 0x2a, 0x87, 0x4a, 0x48, 0xad, 0x81, 0xaa, 0x42, 0x48, 0x45, 0x86, + 0x53, 0x6f, 0x93, 0x64, 0x70, 0x2c, 0x6c, 0x8f, 0x99, 0x19, 0x23, 0xe5, 0xd6, 0x53, 0xcf, 0xdc, + 0xda, 0x3f, 0xa1, 0xa7, 0xfe, 0x1d, 0x1c, 0x39, 0xa2, 0x1e, 0xdc, 0x12, 0x2e, 0x55, 0x4e, 0xfc, + 0x09, 0xd5, 0xcc, 0x38, 0xe0, 0x04, 0xd1, 0x1f, 0xa7, 0xcc, 0xe7, 0xef, 0xbd, 0x37, 0xef, 0x7b, + 0x9f, 0x63, 0xb0, 0x9e, 0x1c, 0xf9, 0xce, 0x71, 0x4a, 0x58, 0x40, 0x98, 0xfa, 0x1d, 0x32, 0x1c, + 0xfb, 0xa4, 0x70, 0xec, 0x62, 0x5e, 0x2c, 0xed, 0x84, 0x51, 0x41, 0x61, 0x73, 0x1a, 0xb0, 0xb2, + 0xe4, 0x53, 0x9f, 0xaa, 0x96, 0x23, 0x4f, 0x1a, 0xb5, 0x82, 0x7c, 0x4a, 0xfd, 0x90, 0x38, 0xaa, + 0xea, 0xa6, 0x87, 0x4e, 0x3f, 0x65, 0x58, 0x04, 0x34, 0xce, 0xfb, 0xd6, 0x6c, 0x5f, 0x04, 0x11, + 0xe1, 0x02, 0x47, 0x49, 0x0e, 0xf8, 0x5b, 0xda, 0x0b, 0xa9, 0xaf, 0x95, 0x27, 0x87, 0xbc, 0xb9, + 0xf9, 0x73, 0xde, 0xfb, 0xe4, 0x30, 0x88, 0x03, 0x79, 0x2b, 0x2f, 0x9e, 0x73, 0x91, 0x87, 0x52, + 0x84, 0x0b, 0xca, 0xb0, 0x4f, 0x9c, 0xde, 0x20, 0x8d, 0x8f, 0x9c, 0x1e, 0xee, 0x0d, 0x88, 0xc3, + 0x08, 0x4f, 0x43, 0xc1, 0x75, 0x21, 0x86, 0x09, 0xc9, 0x19, 0xed, 0x77, 0x65, 0xf0, 0xc7, 0x1e, + 0xa3, 0x11, 0x11, 0x03, 0x92, 0x72, 0x8f, 0x1c, 0xa7, 0x84, 0x0b, 0x08, 0x41, 0x25, 0xc1, 0x62, + 0x60, 0x1a, 0x2d, 0xa3, 0xd3, 0xf0, 0xd4, 0x19, 0x3e, 0x05, 0x55, 0x2e, 0x30, 0x13, 0xe6, 0x5c, + 0xcb, 0xe8, 0x2c, 0xac, 0xad, 0xd8, 0x7a, 0x5c, 0x7b, 0x32, 0xae, 0x7d, 0x30, 0x19, 0xd7, 0x9d, + 0x3f, 0xcb, 0xac, 0xd2, 0xe9, 0x67, 0xcb, 0xf0, 0x34, 0x05, 0xae, 0x83, 0x32, 0x89, 0xfb, 0x66, + 0xf9, 0x17, 0x98, 0x92, 0x20, 0x7d, 0x70, 0x41, 0x12, 0xb3, 0xd2, 0x32, 0x3a, 0x65, 0x4f, 0x9d, + 0xe1, 0x06, 0xa8, 0xcb, 0x60, 0x69, 0x2a, 0xcc, 0xaa, 0xd2, 0xfb, 0xeb, 0x8e, 0xde, 0x56, 0xbe, + 0x18, 0x2d, 0xf7, 0x5e, 0xca, 0x4d, 0x38, 0x70, 0x09, 0x54, 0x55, 0xa4, 0x66, 0x4d, 0xcd, 0xa6, + 0x0b, 0xb8, 0x03, 0x9a, 0x32, 0x9b, 0x20, 0xf6, 0x5f, 0x25, 0x2a, 0x50, 0xb3, 0xae, 0xb4, 0x57, + 0xed, 0x62, 0x72, 0xf6, 0xe6, 0x14, 0xc6, 0xad, 0x48, 0x79, 0x6f, 0x86, 0x09, 0xb7, 0x41, 0xfd, + 0x25, 0xc1, 0x7d, 0xc2, 0xb8, 0x39, 0xdf, 0x2a, 0x77, 0x16, 0xd6, 0xfe, 0xb1, 0x8b, 0x9b, 0xba, + 0x93, 0xb6, 0x06, 0xbb, 0xd5, 0x71, 0x66, 0x19, 0x0f, 0xbc, 0x09, 0xb7, 0xfd, 0x71, 0x0e, 0xc0, + 0x22, 0x96, 0x27, 0x34, 0xe6, 0x04, 0xb6, 0x41, 0x6d, 0x5f, 0x60, 0x91, 0x72, 0xbd, 0x1c, 0x17, + 0x8c, 0x33, 0xab, 0xc6, 0xd5, 0x13, 0x2f, 0xef, 0xc0, 0x1d, 0x50, 0xd9, 0xc2, 0x02, 0xe7, 0x9b, + 0x42, 0xf6, 0xf4, 0x3b, 0x54, 0x70, 0x20, 0x51, 0xee, 0xb2, 0x9c, 0x62, 0x9c, 0x59, 0xcd, 0x3e, + 0x16, 0xf8, 0x7f, 0x1a, 0x05, 0x82, 0x44, 0x89, 0x18, 0x7a, 0x4a, 0x03, 0x3e, 0x01, 0x8d, 0x6d, + 0xc6, 0x28, 0x3b, 0x18, 0x26, 0x44, 0x2d, 0xb0, 0xe1, 0xfe, 0x39, 0xce, 0xac, 0x45, 0x32, 0x79, + 0x58, 0x60, 0xdc, 0x22, 0xe1, 0x7f, 0xa0, 0xaa, 0x0a, 0xb5, 0xba, 0x86, 0xbb, 0x38, 0xce, 0xac, + 0xdf, 0x15, 0xa5, 0x00, 0xd7, 0x08, 0xf8, 0xe2, 0x36, 0xaf, 0xaa, 0xca, 0xeb, 0xdf, 0x7b, 0xf3, + 0xd2, 0x19, 0xdc, 0x13, 0xd8, 0x5b, 0x03, 0x34, 0xa7, 0x47, 0x83, 0x36, 0x00, 0x9e, 0xda, 0x9f, + 0x72, 0xaf, 0x03, 0x6b, 0x8e, 0x33, 0x0b, 0xb0, 0x9b, 0xa7, 0x5e, 0x01, 0x01, 0xb7, 0x40, 0x4d, + 0x57, 0xe6, 0x9c, 0x72, 0xb2, 0x3a, 0x1b, 0xdd, 0x3e, 0x8e, 0x92, 0x90, 0xec, 0x0b, 0x46, 0x70, + 0xe4, 0x36, 0xf3, 0xe0, 0x6a, 0x5a, 0xcd, 0xcb, 0xb9, 0xed, 0x33, 0x03, 0xfc, 0x56, 0x04, 0xc2, + 0x13, 0x50, 0x0b, 0x71, 0x97, 0x84, 0x72, 0x67, 0x65, 0xf5, 0xc6, 0xde, 0xfc, 0xf9, 0x77, 0x89, + 0x8f, 0x7b, 0xc3, 0x5d, 0xd9, 0xdd, 0xc3, 0x01, 0x73, 0x37, 0xa5, 0xe6, 0xa7, 0xcc, 0x7a, 0xe4, + 0x07, 0x62, 0x90, 0x76, 0xed, 0x1e, 0x8d, 0x1c, 0x9f, 0xe1, 0x43, 0x1c, 0x63, 0x27, 0xa4, 0x47, + 0x81, 0x53, 0xfc, 0x86, 0xd8, 0x8a, 0xf7, 0xbc, 0x8f, 0x13, 0x41, 0x98, 0x34, 0x12, 0x11, 0xc1, + 0x82, 0x9e, 0x97, 0xdf, 0x06, 0x9f, 0x81, 0x3a, 0x57, 0x3e, 0x78, 0x3e, 0xcf, 0xf2, 0xec, 0xc5, + 0xda, 0xe6, 0xed, 0x24, 0x27, 0x38, 0x4c, 0x09, 0xf7, 0x26, 0x34, 0x97, 0x9f, 0x5f, 0xa2, 0xd2, + 0xc5, 0x25, 0x2a, 0x5d, 0x5f, 0x22, 0xe3, 0xcd, 0x08, 0x19, 0x1f, 0x46, 0xc8, 0x38, 0x1b, 0x21, + 0xe3, 0x7c, 0x84, 0x8c, 0x2f, 0x23, 0x64, 0x7c, 0x1d, 0xa1, 0xd2, 0xf5, 0x08, 0x19, 0xa7, 0x57, + 0xa8, 0x74, 0x7e, 0x85, 0x4a, 0x17, 0x57, 0xa8, 0xf4, 0x7a, 0xe3, 0x7b, 0xe6, 0x7f, 0xf8, 0x8d, + 0xeb, 0xd6, 0x94, 0xc3, 0xc7, 0xdf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfe, 0xcd, 0xe4, 0x4f, 0xcf, + 0x05, 0x00, 0x00, } func (this *PrometheusRequest) Equal(that interface{}) bool { @@ -651,71 +525,6 @@ func (this *SampleStream) Equal(that interface{}) bool { } return true } -func (this *CachedResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CachedResponse) - if !ok { - that2, ok := that.(CachedResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Key != that1.Key { - return false - } - if len(this.Extents) != len(that1.Extents) { - return false - } - for i := range this.Extents { - if !this.Extents[i].Equal(&that1.Extents[i]) { - return false - } - } - return true -} -func (this *Extent) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Extent) - if !ok { - that2, ok := that.(Extent) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Start != that1.Start { - return false - } - if this.End != that1.End { - return false - } - if this.TraceId != that1.TraceId { - return false - } - if !this.Response.Equal(that1.Response) { - return false - } - return true -} func (this *PrometheusRequest) GoString() string { if this == nil { return "nil" @@ -785,38 +594,6 @@ func (this *SampleStream) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *CachedResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrangebase.CachedResponse{") - s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") - if this.Extents != nil { - vs := make([]*Extent, len(this.Extents)) - for i := range vs { - vs[i] = &this.Extents[i] - } - s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Extent) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&queryrangebase.Extent{") - s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") - s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") - s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") - if this.Response != nil { - s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} func valueToGoStringQueryrange(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1078,102 +855,6 @@ func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *CachedResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Extents) > 0 { - for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Extent) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Extent) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.TraceId) > 0 { - i -= len(m.TraceId) - copy(dAtA[i:], m.TraceId) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TraceId))) - i-- - dAtA[i] = 0x22 - } - if m.End != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x10 - } - if m.Start != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int { offset -= sovQueryrange(v) base := offset @@ -1288,48 +969,6 @@ func (m *SampleStream) Size() (n int) { return n } -func (m *CachedResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Extents) > 0 { - for _, e := range m.Extents { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *Extent) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Start != 0 { - n += 1 + sovQueryrange(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovQueryrange(uint64(m.End)) - } - l = len(m.TraceId) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - func sovQueryrange(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1352,7 +991,7 @@ func (this *PrometheusRequest) String() string { `Step:` + fmt.Sprintf("%v", this.Step) + `,`, `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, - `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "definitions.CachingOptions", 1), `&`, ``, 1) + `,`, + `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "resultscache.CachingOptions", 1), `&`, ``, 1) + `,`, `Headers:` + repeatedStringForHeaders + `,`, `}`, }, "") @@ -1409,35 +1048,6 @@ func (this *SampleStream) String() string { }, "") return s } -func (this *CachedResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForExtents := "[]Extent{" - for _, f := range this.Extents { - repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + "," - } - repeatedStringForExtents += "}" - s := strings.Join([]string{`&CachedResponse{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Extents:` + repeatedStringForExtents + `,`, - `}`, - }, "") - return s -} -func (this *Extent) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Extent{`, - `Start:` + fmt.Sprintf("%v", this.Start) + `,`, - `End:` + fmt.Sprintf("%v", this.End) + `,`, - `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`, - `}`, - }, "") - return s -} func valueToStringQueryrange(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2204,284 +1814,6 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { } return nil } -func (m *CachedResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Extents = append(m.Extents, Extent{}) - if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Extent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Extent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &types.Any{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipQueryrange(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.proto b/pkg/querier/queryrange/queryrangebase/queryrange.proto index ad66551d2bb11..98ddaa2b7d2db 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.proto +++ b/pkg/querier/queryrange/queryrangebase/queryrange.proto @@ -3,11 +3,11 @@ syntax = "proto3"; package queryrangebase; import "gogoproto/gogo.proto"; -import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "pkg/logproto/logproto.proto"; import "pkg/querier/queryrange/queryrangebase/definitions/definitions.proto"; +import "pkg/storage/chunk/cache/resultscache/types.proto"; option go_package = "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"; option (gogoproto.marshaler_all) = true; @@ -29,7 +29,7 @@ message PrometheusRequest { (gogoproto.nullable) = false ]; string query = 6; - definitions.CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false]; + resultscache.CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false]; repeated definitions.PrometheusRequestHeader Headers = 8 [(gogoproto.jsontag) = "-"]; } @@ -63,22 +63,3 @@ message SampleStream { (gogoproto.jsontag) = "values" ]; } - -message CachedResponse { - string key = 1 [(gogoproto.jsontag) = "key"]; - - // List of cached responses; non-overlapping and in order. - repeated Extent extents = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "extents" - ]; -} - -message Extent { - int64 start = 1 [(gogoproto.jsontag) = "start"]; - int64 end = 2 [(gogoproto.jsontag) = "end"]; - // reserved the previous key to ensure cache transition - reserved 3; - string trace_id = 4 [(gogoproto.jsontag) = "-"]; - google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"]; -} diff --git a/pkg/querier/queryrange/queryrangebase/results_cache.go b/pkg/querier/queryrange/queryrangebase/results_cache.go index 1e54b55859402..097dc264d32a0 100644 --- a/pkg/querier/queryrange/queryrangebase/results_cache.go +++ b/pkg/querier/queryrange/queryrangebase/results_cache.go @@ -4,35 +4,21 @@ import ( "context" "flag" "fmt" - "net/http" - "sort" "strings" - "time" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" - otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" - "github.com/uber/jaeger-client-go" - - "github.com/grafana/dskit/tenant" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util/constants" - "github.com/grafana/loki/pkg/util/math" - "github.com/grafana/loki/pkg/util/spanlogger" - "github.com/grafana/loki/pkg/util/validation" ) var ( @@ -65,20 +51,9 @@ func NewResultsCacheMetrics(registerer prometheus.Registerer) *ResultsCacheMetri } } -type CacheGenNumberLoader interface { - GetResultsCacheGenNumber(tenantIDs []string) string - Stop() -} - // ResultsCacheConfig is the config for the results cache. type ResultsCacheConfig struct { - CacheConfig cache.Config `yaml:"cache"` - Compression string `yaml:"compression"` -} - -func (cfg *ResultsCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { - cfg.CacheConfig.RegisterFlagsWithPrefix(prefix, "", f) - f.StringVar(&cfg.Compression, prefix+"compression", "", "Use compression in cache. The default is an empty value '', which disables compression. Supported values are: 'snappy' and ''.") + resultscache.Config `yaml:",inline"` } // RegisterFlags registers flags. @@ -86,22 +61,9 @@ func (cfg *ResultsCacheConfig) RegisterFlags(f *flag.FlagSet) { cfg.RegisterFlagsWithPrefix(f, "frontend.") } -func (cfg *ResultsCacheConfig) Validate() error { - switch cfg.Compression { - case "snappy", "": - // valid - default: - return errors.Errorf("unsupported compression type: %s", cfg.Compression) - } - - return nil -} - // Extractor is used by the cache to extract a subset of a response from a cache entry. type Extractor interface { - // Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds - // in the `res` response which spans from `resStart` to `resEnd`. - Extract(start, end int64, res Response, resStart, resEnd int64) Response + resultscache.Extractor ResponseWithoutHeaders(resp Response) Response } @@ -109,7 +71,7 @@ type Extractor interface { type PrometheusResponseExtractor struct{} // Extract extracts response for specific a range from a response. -func (PrometheusResponseExtractor) Extract(start, end int64, res Response, _, _ int64) Response { +func (PrometheusResponseExtractor) Extract(start, end int64, res resultscache.Response, _, _ int64) resultscache.Response { promRes := res.(*PrometheusResponse) return &PrometheusResponse{ Status: StatusSuccess, @@ -134,39 +96,17 @@ func (PrometheusResponseExtractor) ResponseWithoutHeaders(resp Response) Respons } } -// CacheSplitter generates cache keys. This is a useful interface for downstream -// consumers who wish to implement their own strategies. -type CacheSplitter interface { - GenerateCacheKey(ctx context.Context, userID string, r Request) string -} - -// constSplitter is a utility for using a constant split interval when determining cache keys -type constSplitter time.Duration - -// GenerateCacheKey generates a cache key based on the userID, Request and interval. -func (t constSplitter) GenerateCacheKey(_ context.Context, userID string, r Request) string { - currentInterval := r.GetStart().UnixMilli() / int64(time.Duration(t)/time.Millisecond) - return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval) -} - // ShouldCacheFn checks whether the current request should go to cache // or not. If not, just send the request to next handler. type ShouldCacheFn func(ctx context.Context, r Request) bool +// ParallelismForReqFn returns the parallelism for a given request. +type ParallelismForReqFn func(ctx context.Context, tenantIDs []string, r Request) int + type resultsCache struct { - logger log.Logger - next Handler - cache cache.Cache - limits Limits - splitter CacheSplitter - - extractor Extractor - minCacheExtent int64 // discard any cache extent smaller than this - merger Merger - cacheGenNumberLoader CacheGenNumberLoader - shouldCache ShouldCacheFn - parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int - retentionEnabled bool + cache *resultscache.ResultsCache + logger log.Logger + cacheGenNumberLoader resultscache.CacheGenNumberLoader metrics *ResultsCacheMetrics } @@ -179,13 +119,13 @@ type resultsCache struct { func NewResultsCacheMiddleware( logger log.Logger, c cache.Cache, - splitter CacheSplitter, + keygen resultscache.KeyGenerator, limits Limits, merger Merger, extractor Extractor, - cacheGenNumberLoader CacheGenNumberLoader, + cacheGenNumberLoader resultscache.CacheGenNumberLoader, shouldCache ShouldCacheFn, - parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int, + parallelismForReq ParallelismForReqFn, retentionEnabled bool, metrics *ResultsCacheMetrics, ) (Middleware, error) { @@ -193,78 +133,63 @@ func NewResultsCacheMiddleware( c = cache.NewCacheGenNumMiddleware(c) } + out := &resultsCache{ + logger: logger, + cacheGenNumberLoader: cacheGenNumberLoader, + metrics: metrics, + } + return MiddlewareFunc(func(next Handler) Handler { - return &resultsCache{ - logger: logger, - next: next, - cache: c, - limits: limits, - merger: merger, - extractor: extractor, - minCacheExtent: (5 * time.Minute).Milliseconds(), - splitter: splitter, - cacheGenNumberLoader: cacheGenNumberLoader, - shouldCache: shouldCache, - parallelismForReq: parallelismForReq, - retentionEnabled: retentionEnabled, - metrics: metrics, + nextCacheWrapper := resultscache.HandlerFunc(func(ctx context.Context, req resultscache.Request) (resultscache.Response, error) { + return next.Do(ctx, req.(Request)) + }) + + shouldCacheReqWrapper := func(ctx context.Context, req resultscache.Request) bool { + if shouldCache == nil { + return true + } + return shouldCache(ctx, req.(Request)) + } + + shouldCacheResWrapper := func(ctx context.Context, req resultscache.Request, res resultscache.Response, maxCacheTime int64) bool { + return out.shouldCacheResponse(ctx, req.(Request), res.(Response), maxCacheTime) } + + parallelismForReqWrapper := func(ctx context.Context, tenantIDs []string, req resultscache.Request) int { + return parallelismForReq(ctx, tenantIDs, req.(Request)) + } + + out.cache = resultscache.NewResultsCache( + logger, + c, + nextCacheWrapper, + keygen, + limits, + FromQueryResponseMergerToCacheResponseMerger(merger), + extractor, + shouldCacheReqWrapper, + shouldCacheResWrapper, + parallelismForReqWrapper, + cacheGenNumberLoader, + retentionEnabled, + ) + + return out }), nil } func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { - sp, ctx := opentracing.StartSpanFromContext(ctx, "resultsCache.Do") - defer sp.Finish() - tenantIDs, err := tenant.TenantIDs(ctx) + res, err := s.cache.Do(ctx, r.(resultscache.Request)) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - if s.shouldCache != nil && !s.shouldCache(ctx, r) { - return s.next.Do(ctx, r) - } - - if s.cacheGenNumberLoader != nil && s.retentionEnabled { - ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs)) - } - - var ( - key = s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r) - extents []Extent - response Response - ) - - sp.LogKV( - "query", r.GetQuery(), - "step", time.UnixMilli(r.GetStep()), - "start", r.GetStart(), - "end", r.GetEnd(), - "key", key, - ) - - cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) } - maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture) - maxCacheTime := int64(model.Now().Add(-maxCacheFreshness)) - if r.GetStart().UnixMilli() > maxCacheTime { - return s.next.Do(ctx, r) - } - - cached, ok := s.get(ctx, key) - if ok { - response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime) - } else { - response, extents, err = s.handleMiss(ctx, r, maxCacheTime) + return nil, err } - if err == nil && len(extents) > 0 { - extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents) - if err != nil { - return nil, err - } - s.put(ctx, key, extents) + queryRes, ok := res.(Response) + if !ok { + return nil, fmt.Errorf("could not cast cache response to query response") } - return response, err + return queryRes, nil } // shouldCacheResponse says whether the response should be cached or not. @@ -379,303 +304,6 @@ func getHeaderValuesWithName(r Response, headerName string) (headerValues []stri return } -func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) { - response, err := s.next.Do(ctx, r) - if err != nil { - return nil, nil, err - } - - if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) { - return response, []Extent{}, nil - } - - extent, err := toExtent(ctx, r, s.extractor.ResponseWithoutHeaders(response)) - if err != nil { - return nil, nil, err - } - - extents := []Extent{ - extent, - } - return response, extents, nil -} - -func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) { - var ( - reqResps []RequestResponse - err error - ) - sp, ctx := opentracing.StartSpanFromContext(ctx, "handleHit") - defer sp.Finish() - log := spanlogger.FromContext(ctx) - defer log.Finish() - - requests, responses, err := s.partition(r, extents) - if err != nil { - return nil, nil, err - } - if len(requests) == 0 { - response, err := s.merger.MergeResponse(responses...) - // No downstream requests so no need to write back to the cache. - return response, nil, err - } - - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - reqResps, err = DoRequests(ctx, s.next, requests, s.parallelismForReq(ctx, tenantIDs, r)) - - if err != nil { - return nil, nil, err - } - - for _, reqResp := range reqResps { - responses = append(responses, reqResp.Response) - if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) { - continue - } - extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response)) - if err != nil { - return nil, nil, err - } - extents = append(extents, extent) - } - sort.Slice(extents, func(i, j int) bool { - if extents[i].Start == extents[j].Start { - // as an optimization, for two extents starts at the same time, we - // put bigger extent at the front of the slice, which helps - // to reduce the amount of merge we have to do later. - return extents[i].End > extents[j].End - } - - return extents[i].Start < extents[j].Start - }) - - // Merge any extents - potentially overlapping - accumulator, err := newAccumulator(extents[0]) - if err != nil { - return nil, nil, err - } - mergedExtents := make([]Extent, 0, len(extents)) - - for i := 1; i < len(extents); i++ { - if accumulator.End+r.GetStep() < extents[i].Start { - mergedExtents, err = merge(mergedExtents, accumulator) - if err != nil { - return nil, nil, err - } - accumulator, err = newAccumulator(extents[i]) - if err != nil { - return nil, nil, err - } - continue - } - - if accumulator.End >= extents[i].End { - continue - } - - accumulator.TraceId = jaegerTraceID(ctx) - accumulator.End = extents[i].End - currentRes, err := extents[i].toResponse() - if err != nil { - return nil, nil, err - } - merged, err := s.merger.MergeResponse(accumulator.Response, currentRes) - if err != nil { - return nil, nil, err - } - accumulator.Response = merged - } - - mergedExtents, err = merge(mergedExtents, accumulator) - if err != nil { - return nil, nil, err - } - - response, err := s.merger.MergeResponse(responses...) - return response, mergedExtents, err -} - -type accumulator struct { - Response - Extent -} - -func merge(extents []Extent, acc *accumulator) ([]Extent, error) { - anyResp, err := types.MarshalAny(acc.Response) - if err != nil { - return nil, err - } - return append(extents, Extent{ - Start: acc.Extent.Start, - End: acc.Extent.End, - Response: anyResp, - TraceId: acc.Extent.TraceId, - }), nil -} - -func newAccumulator(base Extent) (*accumulator, error) { - res, err := base.toResponse() - if err != nil { - return nil, err - } - return &accumulator{ - Response: res, - Extent: base, - }, nil -} - -func toExtent(ctx context.Context, req Request, res Response) (Extent, error) { - anyResp, err := types.MarshalAny(res) - if err != nil { - return Extent{}, err - } - return Extent{ - Start: req.GetStart().UnixMilli(), - End: req.GetEnd().UnixMilli(), - Response: anyResp, - TraceId: jaegerTraceID(ctx), - }, nil -} - -// partition calculates the required requests to satisfy req given the cached data. -// extents must be in order by start time. -func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) { - var requests []Request - var cachedResponses []Response - start := req.GetStart().UnixMilli() - end := req.GetEnd().UnixMilli() - - for _, extent := range extents { - // If there is no overlap, ignore this extent. - if extent.GetEnd() < start || extent.Start > end { - continue - } - - // If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries. - // Hopefully tiny request can make tiny extent into not-so-tiny extent. - - // However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end. - // For example, if the step size is more than 12h and the interval is 24h. - // This means the extent's start and end time would be same, even if the timerange covers several hours. - if (req.GetStart() != req.GetEnd()) && ((end - start) > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) { - continue - } - - // If there is a bit missing at the front, make a request for that. - if start < extent.Start { - r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(extent.Start)) - requests = append(requests, r) - } - res, err := extent.toResponse() - if err != nil { - return nil, nil, err - } - // extract the overlap from the cached extent. - cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd())) - start = extent.End - } - - // Lastly, make a request for any data missing at the end. - if start < req.GetEnd().UnixMilli() { - r := req.WithStartEnd(time.UnixMilli(start), time.UnixMilli(end)) - requests = append(requests, r) - } - - // If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query. - // But we should only do the request if we don't have a valid cached response for it. - if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 { - requests = append(requests, req) - } - - return requests, cachedResponses, nil -} - -func (s resultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) { - step := math.Max64(1, req.GetStep()) - maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / step) * step - for i := range extents { - // Never cache data for the latest freshness period. - if extents[i].End > maxCacheTime { - extents[i].End = maxCacheTime - res, err := extents[i].toResponse() - if err != nil { - return nil, err - } - extracted := s.extractor.Extract(extents[i].GetStart(), maxCacheTime, res, extents[i].GetStart(), extents[i].GetEnd()) - anyResp, err := types.MarshalAny(extracted) - if err != nil { - return nil, err - } - extents[i].Response = anyResp - } - } - return extents, nil -} - -func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) { - found, bufs, _, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)}) - if len(found) != 1 { - return nil, false - } - - var resp CachedResponse - sp, ctx := opentracing.StartSpanFromContext(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck - defer sp.Finish() - log := spanlogger.FromContext(ctx) - defer log.Finish() - - log.LogFields(otlog.Int("bytes", len(bufs[0]))) - - if err := proto.Unmarshal(bufs[0], &resp); err != nil { - level.Error(log).Log("msg", "error unmarshalling cached value", "err", err) - log.Error(err) - return nil, false - } - - if resp.Key != key { - return nil, false - } - - // Refreshes the cache if it contains an old proto schema. - for _, e := range resp.Extents { - if e.Response == nil { - return nil, false - } - } - - return resp.Extents, true -} - -func (s resultsCache) put(ctx context.Context, key string, extents []Extent) { - buf, err := proto.Marshal(&CachedResponse{ - Key: key, - Extents: extents, - }) - if err != nil { - level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err) - return - } - - _ = s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf}) -} - -func jaegerTraceID(ctx context.Context) string { - span := opentracing.SpanFromContext(ctx) - if span == nil { - return "" - } - - spanContext, ok := span.Context().(jaeger.SpanContext) - if !ok { - return "" - } - - return spanContext.TraceID().String() -} - func extractMatrix(start, end int64, matrix []SampleStream) []SampleStream { result := make([]SampleStream, 0, len(matrix)) for _, stream := range matrix { @@ -702,20 +330,3 @@ func extractSampleStream(start, end int64, stream SampleStream) (SampleStream, b } return result, true } - -func (e *Extent) toResponse() (Response, error) { - msg, err := types.EmptyAny(e.Response) - if err != nil { - return nil, err - } - - if err := types.UnmarshalAny(e.Response, msg); err != nil { - return nil, err - } - - resp, ok := msg.(Response) - if !ok { - return nil, fmt.Errorf("bad cached type") - } - return resp, nil -} diff --git a/pkg/querier/queryrange/queryrangebase/results_cache_test.go b/pkg/querier/queryrange/queryrangebase/results_cache_test.go index 8020764d1f4a3..ff5e5be09a48f 100644 --- a/pkg/querier/queryrange/queryrangebase/results_cache_test.go +++ b/pkg/querier/queryrange/queryrangebase/results_cache_test.go @@ -3,7 +3,6 @@ package queryrangebase import ( "context" "fmt" - "strconv" "testing" "time" @@ -18,6 +17,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util/constants" ) @@ -398,362 +398,13 @@ func TestShouldCache(t *testing.T) { } } -func TestPartition(t *testing.T) { - for _, tc := range []struct { - name string - input Request - prevCachedResponse []Extent - expectedRequests []Request - expectedCachedResponse []Response - }{ - { - name: "Test a complete hit.", - input: &PrometheusRequest{ - Start: time.UnixMilli(0), - End: time.UnixMilli(100), - }, - prevCachedResponse: []Extent{ - mkExtent(0, 100), - }, - expectedCachedResponse: []Response{ - mkAPIResponse(0, 100, 10), - }, - }, - - { - name: "Test with a complete miss.", - input: &PrometheusRequest{ - Start: time.UnixMilli(0), - End: time.UnixMilli(100), - }, - prevCachedResponse: []Extent{ - mkExtent(110, 210), - }, - expectedRequests: []Request{ - &PrometheusRequest{ - Start: time.UnixMilli(0), - End: time.UnixMilli(100), - }, - }, - }, - { - name: "Test a partial hit.", - input: &PrometheusRequest{ - Start: time.UnixMilli(0), - End: time.UnixMilli(100), - }, - prevCachedResponse: []Extent{ - mkExtent(50, 100), - }, - expectedRequests: []Request{ - &PrometheusRequest{ - Start: time.UnixMilli(0), - End: time.UnixMilli(50), - }, - }, - expectedCachedResponse: []Response{ - mkAPIResponse(50, 100, 10), - }, - }, - { - name: "Test multiple partial hits.", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(200), - }, - prevCachedResponse: []Extent{ - mkExtent(50, 120), - mkExtent(160, 250), - }, - expectedRequests: []Request{ - &PrometheusRequest{ - Start: time.UnixMilli(120), - End: time.UnixMilli(160), - }, - }, - expectedCachedResponse: []Response{ - mkAPIResponse(100, 120, 10), - mkAPIResponse(160, 200, 10), - }, - }, - { - name: "Partial hits with tiny gap.", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(160), - }, - prevCachedResponse: []Extent{ - mkExtent(50, 120), - mkExtent(122, 130), - }, - expectedRequests: []Request{ - &PrometheusRequest{ - Start: time.UnixMilli(120), - End: time.UnixMilli(160), - }, - }, - expectedCachedResponse: []Response{ - mkAPIResponse(100, 120, 10), - }, - }, - { - name: "Extent is outside the range and the request has a single step (same start and end).", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(100), - }, - prevCachedResponse: []Extent{ - mkExtent(50, 90), - }, - expectedRequests: []Request{ - &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(100), - }, - }, - }, - { - name: "Test when hit has a large step and only a single sample extent.", - // If there is a only a single sample in the split interval, start and end will be the same. - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(100), - }, - prevCachedResponse: []Extent{ - mkExtent(100, 100), - }, - expectedCachedResponse: []Response{ - mkAPIResponse(100, 105, 10), - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - s := resultsCache{ - extractor: PrometheusResponseExtractor{}, - minCacheExtent: 10, - } - reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse) - require.Nil(t, err) - require.Equal(t, tc.expectedRequests, reqs) - require.Equal(t, tc.expectedCachedResponse, resps) - }) - } -} - -func TestHandleHit(t *testing.T) { - for _, tc := range []struct { - name string - input Request - cachedEntry []Extent - expectedUpdatedCachedEntry []Extent - }{ - { - name: "Should drop tiny extent that overlaps with non-tiny request only", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(120), - Step: 5, - }, - cachedEntry: []Extent{ - mkExtentWithStep(0, 50, 5), - mkExtentWithStep(60, 65, 5), - mkExtentWithStep(100, 105, 5), - mkExtentWithStep(110, 150, 5), - mkExtentWithStep(160, 165, 5), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(0, 50, 5), - mkExtentWithStep(60, 65, 5), - mkExtentWithStep(100, 150, 5), - mkExtentWithStep(160, 165, 5), - }, - }, - { - name: "Should replace tiny extents that are cover by bigger request", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(200), - Step: 5, - }, - cachedEntry: []Extent{ - mkExtentWithStep(0, 50, 5), - mkExtentWithStep(60, 65, 5), - mkExtentWithStep(100, 105, 5), - mkExtentWithStep(110, 115, 5), - mkExtentWithStep(120, 125, 5), - mkExtentWithStep(220, 225, 5), - mkExtentWithStep(240, 250, 5), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(0, 50, 5), - mkExtentWithStep(60, 65, 5), - mkExtentWithStep(100, 200, 5), - mkExtentWithStep(220, 225, 5), - mkExtentWithStep(240, 250, 5), - }, - }, - { - name: "Should not drop tiny extent that completely overlaps with tiny request", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(105), - Step: 5, - }, - cachedEntry: []Extent{ - mkExtentWithStep(0, 50, 5), - mkExtentWithStep(60, 65, 5), - mkExtentWithStep(100, 105, 5), - mkExtentWithStep(160, 165, 5), - }, - expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache - }, - { - name: "Should not drop tiny extent that partially center-overlaps with tiny request", - input: &PrometheusRequest{ - Start: time.UnixMilli(106), - End: time.UnixMilli(108), - Step: 2, - }, - cachedEntry: []Extent{ - mkExtentWithStep(60, 64, 2), - mkExtentWithStep(104, 110, 2), - mkExtentWithStep(160, 166, 2), - }, - expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache - }, - { - name: "Should not drop tiny extent that partially left-overlaps with tiny request", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(106), - Step: 2, - }, - cachedEntry: []Extent{ - mkExtentWithStep(60, 64, 2), - mkExtentWithStep(104, 110, 2), - mkExtentWithStep(160, 166, 2), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(60, 64, 2), - mkExtentWithStep(100, 110, 2), - mkExtentWithStep(160, 166, 2), - }, - }, - { - name: "Should not drop tiny extent that partially right-overlaps with tiny request", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(106), - Step: 2, - }, - cachedEntry: []Extent{ - mkExtentWithStep(60, 64, 2), - mkExtentWithStep(98, 102, 2), - mkExtentWithStep(160, 166, 2), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(60, 64, 2), - mkExtentWithStep(98, 106, 2), - mkExtentWithStep(160, 166, 2), - }, - }, - { - name: "Should merge fragmented extents if request fills the hole", - input: &PrometheusRequest{ - Start: time.UnixMilli(40), - End: time.UnixMilli(80), - Step: 20, - }, - cachedEntry: []Extent{ - mkExtentWithStep(0, 20, 20), - mkExtentWithStep(80, 100, 20), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(0, 100, 20), - }, - }, - { - name: "Should left-extend extent if request starts earlier than extent in cache", - input: &PrometheusRequest{ - Start: time.UnixMilli(40), - End: time.UnixMilli(80), - Step: 20, - }, - cachedEntry: []Extent{ - mkExtentWithStep(60, 160, 20), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(40, 160, 20), - }, - }, - { - name: "Should right-extend extent if request ends later than extent in cache", - input: &PrometheusRequest{ - Start: time.UnixMilli(100), - End: time.UnixMilli(180), - Step: 20, - }, - cachedEntry: []Extent{ - mkExtentWithStep(60, 160, 20), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(60, 180, 20), - }, - }, - { - name: "Should not throw error if complete-overlapped smaller Extent is erroneous", - input: &PrometheusRequest{ - // This request is carefully crated such that cachedEntry is not used to fulfill - // the request. - Start: time.UnixMilli(160), - End: time.UnixMilli(180), - Step: 20, - }, - cachedEntry: []Extent{ - { - Start: 60, - End: 80, - - // if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil - // response would cause error during Extents merge phase. With the optimization - // this bad Extent should be dropped. The good Extent below can be used instead. - Response: nil, - }, - mkExtentWithStep(60, 160, 20), - }, - expectedUpdatedCachedEntry: []Extent{ - mkExtentWithStep(60, 180, 20), - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - sut := resultsCache{ - extractor: PrometheusResponseExtractor{}, - minCacheExtent: 10, - limits: mockLimits{}, - merger: PrometheusCodec, - parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 }, - next: HandlerFunc(func(_ context.Context, req Request) (Response, error) { - return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil - }), - } - - ctx := user.InjectOrgID(context.Background(), "1") - response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0) - require.NoError(t, err) - - expectedResponse := mkAPIResponse(tc.input.GetStart().UnixMilli(), tc.input.GetEnd().UnixMilli(), tc.input.GetStep()) - require.Equal(t, expectedResponse, response, "response does not match the expectation") - require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation") - }) - } -} - func TestResultsCache(t *testing.T) { calls := 0 cfg := ResultsCacheConfig{ - CacheConfig: cache.Config{ - Cache: cache.NewMockCache(), + Config: resultscache.Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, }, } c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) @@ -761,7 +412,7 @@ func TestResultsCache(t *testing.T) { rcm, err := NewResultsCacheMiddleware( log.NewNopLogger(), c, - constSplitter(day), + resultscache.ConstSplitter(day), mockLimits{}, PrometheusCodec, PrometheusResponseExtractor{}, @@ -807,7 +458,7 @@ func TestResultsCacheRecent(t *testing.T) { rcm, err := NewResultsCacheMiddleware( log.NewNopLogger(), c, - constSplitter(day), + resultscache.ConstSplitter(day), mockLimits{maxCacheFreshness: 10 * time.Minute}, PrometheusCodec, PrometheusResponseExtractor{}, @@ -844,122 +495,6 @@ func TestResultsCacheRecent(t *testing.T) { require.Equal(t, parsedResponse, resp) } -func TestResultsCacheMaxFreshness(t *testing.T) { - modelNow := model.Now() - for i, tc := range []struct { - fakeLimits Limits - Handler HandlerFunc - expectedResponse *PrometheusResponse - }{ - { - fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second}, - Handler: nil, - expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10), - }, - { - // should not lookup cache because per-tenant override will be applied - fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute}, - Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { - return parsedResponse, nil - }), - expectedResponse: parsedResponse, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - var cfg ResultsCacheConfig - flagext.DefaultValues(&cfg) - cfg.CacheConfig.Cache = cache.NewMockCache() - c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) - require.NoError(t, err) - fakeLimits := tc.fakeLimits - rcm, err := NewResultsCacheMiddleware( - log.NewNopLogger(), - c, - constSplitter(day), - fakeLimits, - PrometheusCodec, - PrometheusResponseExtractor{}, - nil, - nil, - func(_ context.Context, tenantIDs []string, r Request) int { - return tc.fakeLimits.MaxQueryParallelism(context.Background(), "fake") - }, - false, - nil, - ) - require.NoError(t, err) - - // create cache with handler - rc := rcm.Wrap(tc.Handler) - ctx := user.InjectOrgID(context.Background(), "1") - - // create request with start end within the key extents - req := parsedRequest.WithStartEnd(time.UnixMilli(int64(modelNow)-(50*1e3)), time.UnixMilli(int64(modelNow)-(10*1e3))) - - // fill cache - key := constSplitter(day).GenerateCacheKey(context.Background(), "1", req) - rc.(*resultsCache).put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))}) - - resp, err := rc.Do(ctx, req) - require.NoError(t, err) - require.Equal(t, tc.expectedResponse, resp) - }) - } -} - -func Test_resultsCache_MissingData(t *testing.T) { - cfg := ResultsCacheConfig{ - CacheConfig: cache.Config{ - Cache: cache.NewMockCache(), - }, - } - c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) - require.NoError(t, err) - rm, err := NewResultsCacheMiddleware( - log.NewNopLogger(), - c, - constSplitter(day), - mockLimits{}, - PrometheusCodec, - PrometheusResponseExtractor{}, - nil, - nil, - func(_ context.Context, tenantIDs []string, r Request) int { - return mockLimits{}.MaxQueryParallelism(context.Background(), "fake") - }, - false, - nil, - ) - require.NoError(t, err) - rc := rm.Wrap(nil).(*resultsCache) - ctx := context.Background() - - // fill up the cache - rc.put(ctx, "empty", []Extent{{ - Start: 100, - End: 200, - Response: nil, - }}) - rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)}) - rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), { - Start: 120, - End: 200, - Response: nil, - }}) - - extents, hit := rc.get(ctx, "empty") - require.Empty(t, extents) - require.False(t, hit) - - extents, hit = rc.get(ctx, "notempty") - require.Equal(t, len(extents), 1) - require.True(t, hit) - - extents, hit = rc.get(ctx, "mixed") - require.Equal(t, len(extents), 0) - require.False(t, hit) -} - func toMs(t time.Duration) int64 { return t.Nanoseconds() / (int64(time.Millisecond) / int64(time.Nanosecond)) } @@ -984,7 +519,7 @@ func TestConstSplitter_generateCacheKey(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) { - if got := constSplitter(tt.interval).GenerateCacheKey(context.Background(), "fake", tt.r); got != tt.want { + if got := resultscache.ConstSplitter(tt.interval).GenerateCacheKey(context.Background(), "fake", tt.r.(resultscache.Request)); got != tt.want { t.Errorf("generateKey() = %v, want %v", got, tt.want) } }) @@ -1033,7 +568,7 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) { rcm, err := NewResultsCacheMiddleware( log.NewNopLogger(), c, - constSplitter(day), + resultscache.ConstSplitter(day), mockLimits{maxCacheFreshness: 10 * time.Minute}, PrometheusCodec, PrometheusResponseExtractor{}, diff --git a/pkg/querier/queryrange/queryrangebase/roundtrip.go b/pkg/querier/queryrange/queryrangebase/roundtrip.go index 847d311323c1e..1e0fe625f24d2 100644 --- a/pkg/querier/queryrange/queryrangebase/roundtrip.go +++ b/pkg/querier/queryrange/queryrangebase/roundtrip.go @@ -22,6 +22,8 @@ import ( "time" "github.com/pkg/errors" + + "github.com/grafana/dskit/flagext" ) const day = 24 * time.Hour @@ -33,11 +35,12 @@ var PassthroughMiddleware = MiddlewareFunc(func(next Handler) Handler { // Config for query_range middleware chain. type Config struct { - AlignQueriesWithStep bool `yaml:"align_queries_with_step"` - ResultsCacheConfig ResultsCacheConfig `yaml:"results_cache"` - CacheResults bool `yaml:"cache_results"` - MaxRetries int `yaml:"max_retries"` - ShardedQueries bool `yaml:"parallelise_shardable_queries"` + AlignQueriesWithStep bool `yaml:"align_queries_with_step"` + ResultsCacheConfig ResultsCacheConfig `yaml:"results_cache"` + CacheResults bool `yaml:"cache_results"` + MaxRetries int `yaml:"max_retries"` + ShardedQueries bool `yaml:"parallelise_shardable_queries"` + ShardAggregations flagext.StringSliceCSV `yaml:"shard_aggregations"` } // RegisterFlags adds the flags required to config this to the given FlagSet. @@ -47,6 +50,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.CacheResults, "querier.cache-results", false, "Cache query results.") f.BoolVar(&cfg.ShardedQueries, "querier.parallelise-shardable-queries", true, "Perform query parallelisations based on storage sharding configuration and query ASTs. This feature is supported only by the chunks storage engine.") + cfg.ShardAggregations = []string{} + f.Var(&cfg.ShardAggregations, "querier.shard-aggregations", + "A comma-separated list of LogQL vector and range aggregations that should be sharded") + cfg.ResultsCacheConfig.RegisterFlags(f) } @@ -57,6 +64,11 @@ func (cfg *Config) Validate() error { return errors.Wrap(err, "invalid results_cache config") } } + + if len(cfg.ShardAggregations) > 0 && !cfg.ShardedQueries { + return errors.New("shard_aggregations requires parallelise_shardable_queries=true") + } + return nil } diff --git a/pkg/querier/queryrange/queryrangebase/util.go b/pkg/querier/queryrange/queryrangebase/util.go index ee3aad8c15694..5073b715bc269 100644 --- a/pkg/querier/queryrange/queryrangebase/util.go +++ b/pkg/querier/queryrange/queryrangebase/util.go @@ -2,6 +2,8 @@ package queryrangebase import ( "context" + + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" ) // RequestResponse contains a request response and the respective request that was used. @@ -58,3 +60,23 @@ func DoRequests(ctx context.Context, downstream Handler, reqs []Request, paralle return resps, firstErr } + +type queryMergerAsCacheResponseMerger struct { + Merger +} + +func (m *queryMergerAsCacheResponseMerger) MergeResponse(responses ...resultscache.Response) (resultscache.Response, error) { + cacheResponses := make([]Response, 0, len(responses)) + for _, r := range responses { + cacheResponses = append(cacheResponses, r.(Response)) + } + response, err := m.Merger.MergeResponse(cacheResponses...) + if err != nil { + return nil, err + } + return response.(resultscache.Response), nil +} + +func FromQueryResponseMergerToCacheResponseMerger(m Merger) resultscache.ResponseMerger { + return &queryMergerAsCacheResponseMerger{m} +} diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index 1df7bb4616fb9..a6c32b1525862 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -41,6 +41,7 @@ func NewQueryShardMiddleware( limits Limits, maxShards int, statsHandler queryrangebase.Handler, + shardAggregation []string, ) queryrangebase.Middleware { noshards := !hasShards(confs) @@ -54,7 +55,7 @@ func NewQueryShardMiddleware( } mapperware := queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { - return newASTMapperware(confs, engineOpts, next, statsHandler, logger, shardingMetrics, limits, maxShards) + return newASTMapperware(confs, engineOpts, next, statsHandler, logger, shardingMetrics, limits, maxShards, shardAggregation) }) return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { @@ -79,16 +80,18 @@ func newASTMapperware( metrics *logql.MapperMetrics, limits Limits, maxShards int, + shardAggregation []string, ) *astMapperware { ast := &astMapperware{ - confs: confs, - logger: log.With(logger, "middleware", "QueryShard.astMapperware"), - limits: limits, - next: next, - statsHandler: next, - ng: logql.NewDownstreamEngine(engineOpts, DownstreamHandler{next: next, limits: limits}, limits, logger), - metrics: metrics, - maxShards: maxShards, + confs: confs, + logger: log.With(logger, "middleware", "QueryShard.astMapperware"), + limits: limits, + next: next, + statsHandler: next, + ng: logql.NewDownstreamEngine(engineOpts, DownstreamHandler{next: next, limits: limits}, limits, logger), + metrics: metrics, + maxShards: maxShards, + shardAggregation: shardAggregation, } if statsHandler != nil { @@ -107,6 +110,10 @@ type astMapperware struct { ng *logql.DownstreamEngine metrics *logql.MapperMetrics maxShards int + + // Feature flag for sharding range and vector aggregations such as + // quantile_ver_time with probabilistic data structures. + shardAggregation []string } func (ast *astMapperware) checkQuerySizeLimit(ctx context.Context, bytesPerShard uint64, notShardable bool) error { @@ -143,7 +150,12 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que util_log.WithContext(ctx, ast.logger), ) - maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDuration(r.GetQuery()) + params, err := ParamsFromRequest(r) + if err != nil { + return nil, err + } + + maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDuration(params.GetExpression()) if err != nil { level.Warn(logger).Log("err", err.Error(), "msg", "failed to get range-vector and offset duration so skipped AST mapper for request") return ast.next.Do(ctx, r) @@ -183,12 +195,7 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que return ast.next.Do(ctx, r) } - mapper := logql.NewShardMapper(resolver, ast.metrics) - - params, err := ParamsFromRequest(r) - if err != nil { - return nil, err - } + mapper := logql.NewShardMapper(resolver, ast.metrics, ast.shardAggregation) noop, bytesPerShard, parsed, err := mapper.Parse(params.GetExpression()) if err != nil { diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index d3d3ce807ac64..95d809776b406 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -172,6 +172,7 @@ func Test_astMapper(t *testing.T) { nilShardingMetrics, fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second}, 0, + []string{}, ) req := defaultReq() @@ -316,6 +317,7 @@ func Test_astMapper_QuerySizeLimits(t *testing.T) { maxQuerierBytesRead: tc.maxQuerierBytesSize, }, 0, + []string{}, ) req := defaultReq() @@ -354,6 +356,7 @@ func Test_ShardingByPass(t *testing.T) { nilShardingMetrics, fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1}, 0, + []string{}, ) req := defaultReq() @@ -434,7 +437,9 @@ func Test_InstantSharding(t *testing.T) { queryTimeout: time.Second, }, 0, - nil) + nil, + []string{}, + ) response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { lock.Lock() defer lock.Unlock() @@ -585,7 +590,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }{ { name: "logs query touching just the active schema config", - req: defaultReq().WithStartEndTime(now.Add(-time.Hour).Time(), now.Time()).WithQuery(`{foo="bar"}`), + req: defaultReq().WithStartEnd(now.Add(-time.Hour).Time(), now.Time()).WithQuery(`{foo="bar"}`), resp: &LokiResponse{ Status: loghttp.QueryStatusSuccess, Headers: []definitions.PrometheusResponseHeader{ @@ -596,7 +601,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }, { name: "logs query touching just the prev schema config", - req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), confs[0].From.Time.Add(time.Hour).Time()).WithQuery(`{foo="bar"}`), + req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), confs[0].From.Time.Add(time.Hour).Time()).WithQuery(`{foo="bar"}`), resp: &LokiResponse{ Status: loghttp.QueryStatusSuccess, Headers: []definitions.PrometheusResponseHeader{ @@ -607,7 +612,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }, { name: "metric query touching just the active schema config", - req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`), + req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`), resp: &LokiPromResponse{ Response: &queryrangebase.PrometheusResponse{ Status: loghttp.QueryStatusSuccess, @@ -624,7 +629,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }, { name: "metric query touching just the prev schema config", - req: defaultReq().WithStartEndTime(confs[0].From.Time.Add(time.Hour).Time(), confs[0].From.Time.Add(2*time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`), + req: defaultReq().WithStartEnd(confs[0].From.Time.Add(time.Hour).Time(), confs[0].From.Time.Add(2*time.Hour).Time()).WithQuery(`rate({foo="bar"}[1m])`), resp: &LokiPromResponse{ Response: &queryrangebase.PrometheusResponse{ Status: loghttp.QueryStatusSuccess, @@ -641,7 +646,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }, { name: "logs query covering both schemas", - req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), now.Time()).WithQuery(`{foo="bar"}`), + req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), now.Time()).WithQuery(`{foo="bar"}`), resp: &LokiResponse{ Status: loghttp.QueryStatusSuccess, Headers: []definitions.PrometheusResponseHeader{ @@ -652,7 +657,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }, { name: "metric query covering both schemas", - req: defaultReq().WithStartEndTime(confs[0].From.Time.Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m])`), + req: defaultReq().WithStartEnd(confs[0].From.Time.Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m])`), resp: &LokiPromResponse{ Response: &queryrangebase.PrometheusResponse{ Status: loghttp.QueryStatusSuccess, @@ -669,7 +674,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }, { name: "metric query with start/end within first schema but with large enough range to cover previous schema too", - req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[24h])`), + req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), confs[1].From.Time.Add(time.Hour).Time()).WithQuery(`rate({foo="bar"}[24h])`), resp: &LokiPromResponse{ Response: &queryrangebase.PrometheusResponse{ Status: loghttp.QueryStatusSuccess, @@ -686,7 +691,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { }, { name: "metric query with start/end within first schema but with large enough offset to shift it to previous schema", - req: defaultReq().WithStartEndTime(confs[1].From.Time.Add(5*time.Minute).Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m] offset 12h)`), + req: defaultReq().WithStartEnd(confs[1].From.Time.Add(5*time.Minute).Time(), now.Time()).WithQuery(`rate({foo="bar"}[1m] offset 12h)`), resp: &LokiPromResponse{ Response: &queryrangebase.PrometheusResponse{ Status: loghttp.QueryStatusSuccess, @@ -722,6 +727,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { nilShardingMetrics, fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second}, 0, + []string{}, ) // currently all the tests call `defaultReq()` which creates an instance of the type LokiRequest @@ -856,6 +862,7 @@ func Test_ASTMapper_MaxLookBackPeriod(t *testing.T) { nilShardingMetrics, fakeLimits{maxSeries: math.MaxInt32, tsdbMaxQueryParallelism: 1, queryTimeout: time.Second}, 0, + []string{}, ) q := `{cluster="dev-us-central-0"}` diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 2b24ab4a917dc..c03d459ba9b23 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -7,11 +7,10 @@ import ( "strings" "time" - "github.com/grafana/dskit/user" - "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/user" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -243,11 +242,6 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, switch op := req.(type) { case *LokiRequest: - expr, err := syntax.ParseExpr(op.Query) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - queryHash := util.HashedQuery(op.Query) level.Info(logger).Log( "msg", "executing query", @@ -262,7 +256,11 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, "query_hash", queryHash, ) - switch e := expr.(type) { + if op.Plan == nil { + return nil, errors.New("query plan is empty") + } + + switch e := op.Plan.AST.(type) { case syntax.SampleExpr: // The error will be handled later. groups, err := e.MatcherGroups() @@ -303,15 +301,10 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, return r.labels.Do(ctx, req) case *LokiInstantRequest: - expr, err := syntax.ParseExpr(op.Query) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - queryHash := util.HashedQuery(op.Query) level.Info(logger).Log("msg", "executing query", "type", "instant", "query", op.Query, "query_hash", queryHash) - switch expr.(type) { + switch op.Plan.AST.(type) { case syntax.SampleExpr: return r.instantMetric.Do(ctx, req) default: @@ -441,6 +434,7 @@ func NewLogFilterTripperware( limits, 0, // 0 is unlimited shards statsHandler, + cfg.ShardAggregations, ), ) } else { @@ -458,10 +452,7 @@ func NewLogFilterTripperware( ) } - if len(queryRangeMiddleware) > 0 { - return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) - } - return next + return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) }), nil } @@ -541,10 +532,7 @@ func NewSeriesTripperware( } return base.MiddlewareFunc(func(next base.Handler) base.Handler { - if len(queryRangeMiddleware) > 0 { - return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) - } - return next + return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) }), nil } @@ -575,11 +563,8 @@ func NewLabelsTripperware( } return base.MiddlewareFunc(func(next base.Handler) base.Handler { - if len(queryRangeMiddleware) > 0 { - // Do not forward any request header. - return base.MergeMiddlewares(queryRangeMiddleware...).Wrap(next) - } - return next + // Do not forward any request header. + return base.MergeMiddlewares(queryRangeMiddleware...).Wrap(next) }), nil } @@ -674,6 +659,7 @@ func NewMetricTripperware( limits, 0, // 0 is unlimited shards statsHandler, + cfg.ShardAggregations, ), ) } else { @@ -738,6 +724,7 @@ func NewInstantMetricTripperware( limits, 0, // 0 is unlimited shards statsHandler, + cfg.ShardAggregations, ), ) } diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index b91e088041db9..f6379ce728d08 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -29,6 +29,7 @@ import ( "github.com/grafana/loki/pkg/querier/plan" base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/index/seriesvolume" "github.com/grafana/loki/pkg/util" @@ -46,11 +47,13 @@ var ( MaxRetries: 3, CacheResults: true, ResultsCacheConfig: base.ResultsCacheConfig{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ - Enabled: true, - MaxSizeMB: 1024, - TTL: 24 * time.Hour, + Config: resultscache.Config{ + CacheConfig: cache.Config{ + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 1024, + TTL: 24 * time.Hour, + }, }, }, }, @@ -59,22 +62,26 @@ var ( CacheIndexStatsResults: true, StatsCacheConfig: IndexStatsCacheConfig{ ResultsCacheConfig: base.ResultsCacheConfig{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ - Enabled: true, - MaxSizeMB: 1024, - TTL: 24 * time.Hour, + Config: resultscache.Config{ + CacheConfig: cache.Config{ + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 1024, + TTL: 24 * time.Hour, + }, }, }, }, }, VolumeCacheConfig: VolumeCacheConfig{ ResultsCacheConfig: base.ResultsCacheConfig{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ - Enabled: true, - MaxSizeMB: 1024, - TTL: 24 * time.Hour, + Config: resultscache.Config{ + CacheConfig: cache.Config{ + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 1024, + TTL: 24 * time.Hour, + }, }, }, }, @@ -192,6 +199,9 @@ func TestMetricsTripperware(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`rate({app="foo"} |= "foo"[1m])`), + }, } ctx := user.InjectOrgID(context.Background(), "1") @@ -275,6 +285,9 @@ func TestLogFilterTripperware(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"} |= "foo"`), + }, } ctx := user.InjectOrgID(context.Background(), "1") @@ -665,10 +678,12 @@ func TestNewTripperware_Caches(t *testing.T) { Config: base.Config{ CacheResults: true, ResultsCacheConfig: base.ResultsCacheConfig{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ - MaxSizeMB: 1, - Enabled: true, + Config: resultscache.Config{ + CacheConfig: cache.Config{ + EmbeddedCache: cache.EmbeddedCacheConfig{ + MaxSizeMB: 1, + Enabled: true, + }, }, }, }, @@ -684,10 +699,12 @@ func TestNewTripperware_Caches(t *testing.T) { Config: base.Config{ CacheResults: true, ResultsCacheConfig: base.ResultsCacheConfig{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ - MaxSizeMB: 1, - Enabled: true, + Config: resultscache.Config{ + CacheConfig: cache.Config{ + EmbeddedCache: cache.EmbeddedCacheConfig{ + MaxSizeMB: 1, + Enabled: true, + }, }, }, }, @@ -703,10 +720,12 @@ func TestNewTripperware_Caches(t *testing.T) { Config: base.Config{ CacheResults: true, ResultsCacheConfig: base.ResultsCacheConfig{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ - Enabled: true, - MaxSizeMB: 2000, + Config: resultscache.Config{ + CacheConfig: cache.Config{ + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 2000, + }, }, }, }, @@ -714,10 +733,12 @@ func TestNewTripperware_Caches(t *testing.T) { CacheIndexStatsResults: true, StatsCacheConfig: IndexStatsCacheConfig{ ResultsCacheConfig: base.ResultsCacheConfig{ - CacheConfig: cache.Config{ - EmbeddedCache: cache.EmbeddedCacheConfig{ - Enabled: true, - MaxSizeMB: 1000, + Config: resultscache.Config{ + CacheConfig: cache.Config{ + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 1000, + }, }, }, }, @@ -791,6 +812,9 @@ func TestLogNoFilter(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"}`), + }, } ctx := user.InjectOrgID(context.Background(), "1") @@ -802,7 +826,12 @@ func TestLogNoFilter(t *testing.T) { } func TestPostQueries(t *testing.T) { - lreq := &LokiRequest{Query: `{app="foo"} |~ "foo"`} + lreq := &LokiRequest{ + Query: `{app="foo"} |~ "foo"`, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"} |~ "foo"`), + }, + } ctx := user.InjectOrgID(context.Background(), "1") handler := base.HandlerFunc(func(context.Context, base.Request) (base.Response, error) { t.Error("unexpected default roundtripper called") @@ -840,6 +869,9 @@ func TestTripperware_EntriesLimit(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"}`), + }, } ctx := user.InjectOrgID(context.Background(), "1") @@ -887,6 +919,9 @@ func TestTripperware_RequiredLabels(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(test.qs), + }, } // See loghttp.step step := time.Duration(int(math.Max(math.Floor(lreq.EndTs.Sub(lreq.StartTs).Seconds()/250), 1))) * time.Second @@ -992,6 +1027,9 @@ func TestTripperware_RequiredNumberLabels(t *testing.T) { EndTs: testTime, Direction: logproto.FORWARD, Path: "/loki/api/v1/query_range", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.query), + }, } // See loghttp.step step := time.Duration(int(math.Max(math.Floor(lreq.EndTs.Sub(lreq.StartTs).Seconds()/250), 1))) * time.Second diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index 84b3a519f1269..d568fe65ddde8 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -311,13 +311,17 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran return reqs, nil } -// maxRangeVectorAndOffsetDuration returns the maximum range vector and offset duration within a LogQL query. -func maxRangeVectorAndOffsetDuration(q string) (time.Duration, time.Duration, error) { - expr, err := syntax.ParseExpr(q) +// maxRangeVectorAndOffsetDurationFromQueryString +func maxRangeVectorAndOffsetDurationFromQueryString(q string) (time.Duration, time.Duration, error) { + parsed, err := syntax.ParseExpr(q) if err != nil { return 0, 0, err } + return maxRangeVectorAndOffsetDuration(parsed) +} +// maxRangeVectorAndOffsetDuration returns the maximum range vector and offset duration within a LogQL query. +func maxRangeVectorAndOffsetDuration(expr syntax.Expr) (time.Duration, time.Duration, error) { if _, ok := expr.(syntax.SampleExpr); !ok { return 0, 0, nil } @@ -338,8 +342,8 @@ func maxRangeVectorAndOffsetDuration(q string) (time.Duration, time.Duration, er // reduceSplitIntervalForRangeVector reduces the split interval for a range query based on the duration of the range vector. // Large range vector durations will not be split into smaller intervals because it can cause the queries to be slow by over-processing data. -func reduceSplitIntervalForRangeVector(r queryrangebase.Request, interval time.Duration) (time.Duration, error) { - maxRange, _, err := maxRangeVectorAndOffsetDuration(r.GetQuery()) +func reduceSplitIntervalForRangeVector(r *LokiRequest, interval time.Duration) (time.Duration, error) { + maxRange, _, err := maxRangeVectorAndOffsetDuration(r.Plan.AST) if err != nil { return 0, err } @@ -352,13 +356,13 @@ func reduceSplitIntervalForRangeVector(r queryrangebase.Request, interval time.D func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) { var reqs []queryrangebase.Request - interval, err := reduceSplitIntervalForRangeVector(r, interval) + lokiReq := r.(*LokiRequest) + + interval, err := reduceSplitIntervalForRangeVector(lokiReq, interval) if err != nil { return nil, err } - lokiReq := r.(*LokiRequest) - // step align start and end time of the query. Start time is rounded down and end time is rounded up. stepNs := r.GetStep() * 1e6 startNs := lokiReq.StartTs.UnixNano() diff --git a/pkg/querier/queryrange/split_by_interval_test.go b/pkg/querier/queryrange/split_by_interval_test.go index 78d74b111a12a..2730cf49d918c 100644 --- a/pkg/querier/queryrange/split_by_interval_test.go +++ b/pkg/querier/queryrange/split_by_interval_test.go @@ -228,7 +228,7 @@ func Test_splitMetricQuery(t *testing.T) { const seconds = 1e3 // 1e3 milliseconds per second. for i, tc := range []struct { - input queryrangebase.Request + input *LokiRequest expected []queryrangebase.Request interval time.Duration }{ @@ -600,6 +600,17 @@ func Test_splitMetricQuery(t *testing.T) { interval: 15 * time.Minute, }, } { + // Set query plans + tc.input.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(tc.input.Query), + } + + for _, e := range tc.expected { + e.(*LokiRequest).Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(e.GetQuery()), + } + } + t.Run(strconv.Itoa(i), func(t *testing.T) { splits, err := splitMetricByTime(tc.input, tc.interval) require.NoError(t, err) diff --git a/pkg/querier/queryrange/volume_cache.go b/pkg/querier/queryrange/volume_cache.go index 0c54745654004..954c642ffef8b 100644 --- a/pkg/querier/queryrange/volume_cache.go +++ b/pkg/querier/queryrange/volume_cache.go @@ -15,6 +15,7 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/validation" ) @@ -24,7 +25,7 @@ type VolumeSplitter struct { } // GenerateCacheKey generates a cache key based on the userID, Request and interval. -func (i VolumeSplitter) GenerateCacheKey(ctx context.Context, userID string, r queryrangebase.Request) string { +func (i VolumeSplitter) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { cacheKey := i.cacheKeyLimits.GenerateCacheKey(ctx, userID, r) volumeReq := r.(*logproto.VolumeRequest) @@ -38,7 +39,7 @@ type VolumeExtractor struct{} // Extract favors the ability to cache over exactness of results. It assumes a constant distribution // of log volumes over a range and will extract subsets proportionally. -func (p VolumeExtractor) Extract(start, end int64, res queryrangebase.Response, resStart, resEnd int64) queryrangebase.Response { +func (p VolumeExtractor) Extract(start, end int64, res resultscache.Response, resStart, resEnd int64) resultscache.Response { factor := util.GetFactorOfTime(start, end, resStart, resEnd) volumeRes := res.(*VolumeResponse) @@ -101,7 +102,7 @@ func NewVolumeCacheMiddleware( c cache.Cache, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, shouldCache queryrangebase.ShouldCacheFn, - parallelismForReq func(ctx context.Context, tenantIDs []string, r queryrangebase.Request) int, + parallelismForReq queryrangebase.ParallelismForReqFn, retentionEnabled bool, transformer UserIDTransformer, metrics *queryrangebase.ResultsCacheMetrics, diff --git a/pkg/querier/queryrange/volume_cache_test.go b/pkg/querier/queryrange/volume_cache_test.go index ebe9ef8094b83..904e0fc7c3a99 100644 --- a/pkg/querier/queryrange/volume_cache_test.go +++ b/pkg/querier/queryrange/volume_cache_test.go @@ -10,6 +10,8 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" + "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" @@ -22,8 +24,10 @@ import ( func TestVolumeCache(t *testing.T) { setup := func(volResp *VolumeResponse) (*int, queryrangebase.Handler) { cfg := queryrangebase.ResultsCacheConfig{ - CacheConfig: cache.Config{ - Cache: cache.NewMockCache(), + Config: resultscache.Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, }, } c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) @@ -281,8 +285,10 @@ func TestVolumeCache_RecentData(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { cfg := queryrangebase.ResultsCacheConfig{ - CacheConfig: cache.Config{ - Cache: cache.NewMockCache(), + Config: resultscache.Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, }, } c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) diff --git a/pkg/querier/worker/frontend_processor.go b/pkg/querier/worker/frontend_processor.go index 3e77c3f0e91a8..45c61862d0598 100644 --- a/pkg/querier/worker/frontend_processor.go +++ b/pkg/querier/worker/frontend_processor.go @@ -58,7 +58,7 @@ func (fp *frontendProcessor) notifyShutdown(ctx context.Context, conn *grpc.Clie } // runOne loops, trying to establish a stream to the frontend to begin request processing. -func (fp *frontendProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) { +func (fp *frontendProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address, _ string) { client := frontendv1pb.NewFrontendClient(conn) backoff := backoff.New(ctx, processorBackoffConfig) diff --git a/pkg/querier/worker/frontend_processor_test.go b/pkg/querier/worker/frontend_processor_test.go index e446500dd804b..cecdb7bfe27d3 100644 --- a/pkg/querier/worker/frontend_processor_test.go +++ b/pkg/querier/worker/frontend_processor_test.go @@ -39,7 +39,7 @@ func TestRecvFailDoesntCancelProcess(t *testing.T) { running.Store(true) defer running.Store(false) - mgr.processQueriesOnSingleStream(ctx, cc, "test:12345") + mgr.processQueriesOnSingleStream(ctx, cc, "test:12345", "") }() test.Poll(t, time.Second, true, func() interface{} { diff --git a/pkg/querier/worker/processor_manager.go b/pkg/querier/worker/processor_manager.go index 5d675c88a6576..3a2c8c338865d 100644 --- a/pkg/querier/worker/processor_manager.go +++ b/pkg/querier/worker/processor_manager.go @@ -2,6 +2,7 @@ package worker import ( "context" + "strconv" "sync" "time" @@ -64,7 +65,9 @@ func (pm *processorManager) concurrency(n int) { n = 0 } + workerID := 0 for len(pm.cancels) < n { + workerID++ ctx, cancel := context.WithCancel(pm.ctx) pm.cancels = append(pm.cancels, cancel) @@ -75,7 +78,7 @@ func (pm *processorManager) concurrency(n int) { pm.currentProcessors.Inc() defer pm.currentProcessors.Dec() - pm.p.processQueriesOnSingleStream(ctx, pm.conn, pm.address) + pm.p.processQueriesOnSingleStream(ctx, pm.conn, pm.address, strconv.Itoa(workerID)) }() } diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index 15e3985b60fbd..16d0e59d1ed14 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -83,7 +83,7 @@ func (sp *schedulerProcessor) notifyShutdown(ctx context.Context, conn *grpc.Cli } } -func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Context, conn *grpc.ClientConn, address string) { +func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Context, conn *grpc.ClientConn, address, workerID string) { schedulerClient := sp.schedulerClientFactory(conn) // Run the querier loop (and so all the queries) in a dedicated context that we call the "execution context". @@ -104,7 +104,7 @@ func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Con continue } - if err := sp.querierLoop(c, address, inflightQuery); err != nil { + if err := sp.querierLoop(c, address, inflightQuery, workerID); err != nil { // Do not log an error if the query-scheduler is shutting down. if s, ok := status.FromError(err); !ok || !strings.Contains(s.Message(), schedulerpb.ErrSchedulerIsNotRunning.Error()) { level.Error(sp.log).Log("msg", "error processing requests from scheduler", "err", err, "addr", address) @@ -119,17 +119,20 @@ func (sp *schedulerProcessor) processQueriesOnSingleStream(workerCtx context.Con } // process loops processing requests on an established stream. -func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_QuerierLoopClient, address string, inflightQuery *atomic.Bool) error { +func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_QuerierLoopClient, address string, inflightQuery *atomic.Bool, workerID string) error { // Build a child context so we can cancel a query when the stream is closed. ctx, cancel := context.WithCancel(c.Context()) defer cancel() for { + start := time.Now() request, err := c.Recv() if err != nil { return err } + level.Debug(sp.log).Log("msg", "received query", "worker", workerID, "wait_time_sec", time.Since(start).Seconds()) + inflightQuery.Store(true) // Handle the request on a "background" goroutine, so we go back to diff --git a/pkg/querier/worker/scheduler_processor_test.go b/pkg/querier/worker/scheduler_processor_test.go index b1971bdd76077..154ba1ae4fa73 100644 --- a/pkg/querier/worker/scheduler_processor_test.go +++ b/pkg/querier/worker/scheduler_processor_test.go @@ -41,7 +41,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) { requestHandler.On("Do", mock.Anything, mock.Anything).Return(&queryrange.LokiResponse{}, nil) - sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1") + sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1", "1") // We expect at this point, the execution context has been canceled too. require.Error(t, loopClient.Context().Err()) @@ -91,7 +91,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) { }).Return(&queryrange.LokiResponse{}, nil) startTime := time.Now() - sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1") + sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1", "1") assert.GreaterOrEqual(t, time.Since(startTime), time.Second) // We expect at this point, the execution context has been canceled too. @@ -122,7 +122,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) { requestHandler.On("Do", mock.Anything, mock.Anything).Return(&queryrange.LokiResponse{}, nil) - sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1") + sp.processQueriesOnSingleStream(workerCtx, nil, "127.0.0.1", "1") // We expect no error in the log. assert.NotContains(t, logs.String(), "error") diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index a7bebfbfccf14..b2e50b205d143 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -70,7 +70,7 @@ type processor interface { // This method must react on context being finished, and stop when that happens. // // processorManager (not processor) is responsible for starting as many goroutines as needed for each connection. - processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) + processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address, workerID string) // notifyShutdown notifies the remote query-frontend or query-scheduler that the querier is // shutting down. diff --git a/pkg/querier/worker/worker_test.go b/pkg/querier/worker/worker_test.go index 2f1ccb98d3097..68791b214f178 100644 --- a/pkg/querier/worker/worker_test.go +++ b/pkg/querier/worker/worker_test.go @@ -88,7 +88,7 @@ func getConcurrentProcessors(w *querierWorker) int { type mockProcessor struct{} -func (m mockProcessor) processQueriesOnSingleStream(ctx context.Context, _ *grpc.ClientConn, _ string) { +func (m mockProcessor) processQueriesOnSingleStream(ctx context.Context, _ *grpc.ClientConn, _, _ string) { <-ctx.Done() } diff --git a/pkg/querier/worker_service.go b/pkg/querier/worker_service.go index d0837e4180652..5dba31f3eebc4 100644 --- a/pkg/querier/worker_service.go +++ b/pkg/querier/worker_service.go @@ -3,6 +3,7 @@ package querier import ( "fmt" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" @@ -52,6 +53,7 @@ func (cfg WorkerServiceConfig) QuerierRunningStandalone() bool { // HTTP router for the Prometheus API routes. Then the external HTTP server will be passed // as a http.Handler to the frontend worker. func InitWorkerService( + logger log.Logger, cfg WorkerServiceConfig, reg prometheus.Registerer, handler queryrangebase.Handler, @@ -76,7 +78,7 @@ func InitWorkerService( *(cfg.QuerierWorkerConfig), cfg.SchedulerRing, handler, - util_log.Logger, + logger, reg, codec, ) @@ -102,7 +104,7 @@ func InitWorkerService( *(cfg.QuerierWorkerConfig), cfg.SchedulerRing, handler, - util_log.Logger, + logger, reg, codec, ) diff --git a/pkg/queue/dequeue_qos_test.go b/pkg/queue/dequeue_qos_test.go index 6b1de885943a3..c889cbe8f4c60 100644 --- a/pkg/queue/dequeue_qos_test.go +++ b/pkg/queue/dequeue_qos_test.go @@ -44,7 +44,7 @@ func enqueueRequestsForActor(t testing.TB, actor []string, useActor bool, queue if !useActor { actor = nil } - err := queue.Enqueue("tenant", actor, r, 0, nil) + err := queue.Enqueue("tenant", actor, r, nil) if err != nil { t.Fatal(err) } @@ -58,7 +58,7 @@ func BenchmarkQueryFairness(t *testing.B) { for _, useActor := range []bool{false, true} { t.Run(fmt.Sprintf("use hierarchical queues = %v", useActor), func(t *testing.B) { - requestQueue := NewRequestQueue(1024, 0, NewMetrics(nil, constants.Loki, "query_scheduler")) + requestQueue := NewRequestQueue(1024, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler")) enqueueRequestsForActor(t, []string{}, useActor, requestQueue, numSubRequestsActorA, 50*time.Millisecond) enqueueRequestsForActor(t, []string{"a"}, useActor, requestQueue, numSubRequestsActorA, 100*time.Millisecond) enqueueRequestsForActor(t, []string{"b"}, useActor, requestQueue, numSubRequestsActorB, 50*time.Millisecond) @@ -133,18 +133,18 @@ func TestQueryFairnessAcrossSameLevel(t *testing.T) { 456: [210] **/ - requestQueue := NewRequestQueue(1024, 0, NewMetrics(nil, constants.Loki, "query_scheduler")) - _ = requestQueue.Enqueue("tenant1", []string{}, r(0), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{}, r(1), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{}, r(2), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(10), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(11), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(12), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(20), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(21), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(22), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"xyz", "123"}, r(200), 0, nil) - _ = requestQueue.Enqueue("tenant1", []string{"xyz", "456"}, r(210), 0, nil) + requestQueue := NewRequestQueue(1024, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler")) + _ = requestQueue.Enqueue("tenant1", []string{}, r(0), nil) + _ = requestQueue.Enqueue("tenant1", []string{}, r(1), nil) + _ = requestQueue.Enqueue("tenant1", []string{}, r(2), nil) + _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(10), nil) + _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(11), nil) + _ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(12), nil) + _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(20), nil) + _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(21), nil) + _ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(22), nil) + _ = requestQueue.Enqueue("tenant1", []string{"xyz", "123"}, r(200), nil) + _ = requestQueue.Enqueue("tenant1", []string{"xyz", "456"}, r(210), nil) requestQueue.queues.recomputeUserConsumers() items := make([]int, 0) diff --git a/pkg/queue/metrics.go b/pkg/queue/metrics.go index 5d00edb1a3b16..769fb51c23708 100644 --- a/pkg/queue/metrics.go +++ b/pkg/queue/metrics.go @@ -6,10 +6,9 @@ import ( ) type Metrics struct { - queueLength *prometheus.GaugeVec // Per tenant - discardedRequests *prometheus.CounterVec // Per tenant - enqueueCount *prometheus.CounterVec // Per tenant and level - querierWaitTime *prometheus.HistogramVec // Per querier wait time + queueLength *prometheus.GaugeVec // Per tenant + discardedRequests *prometheus.CounterVec // Per tenant + enqueueCount *prometheus.CounterVec // Per tenant and level } func NewMetrics(registerer prometheus.Registerer, metricsNamespace, subsystem string) *Metrics { @@ -32,13 +31,6 @@ func NewMetrics(registerer prometheus.Registerer, metricsNamespace, subsystem st Name: "enqueue_count", Help: "Total number of enqueued (sub-)queries.", }, []string{"user", "level"}), - querierWaitTime: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: metricsNamespace, - Subsystem: subsystem, - Name: "querier_wait_seconds", - Help: "Time spend waiting for new requests.", - Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 30, 60, 120, 240}, - }, []string{"querier"}), } } diff --git a/pkg/queue/queue.go b/pkg/queue/queue.go index f0475164bd4d1..aab4631e86e4f 100644 --- a/pkg/queue/queue.go +++ b/pkg/queue/queue.go @@ -39,6 +39,11 @@ func (ui QueueIndex) ReuseLastIndex() QueueIndex { return ui - 1 } +type Limits interface { + // MaxConsumers returns the max consumers to use per tenant or 0 to allow all consumers to consume from the queue. + MaxConsumers(user string, allConsumers int) int +} + // Request stored into the queue. type Request any @@ -62,9 +67,9 @@ type RequestQueue struct { pool *SlicePool[Request] } -func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, metrics *Metrics) *RequestQueue { +func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, limits Limits, metrics *Metrics) *RequestQueue { q := &RequestQueue{ - queues: newTenantQueues(maxOutstandingPerTenant, forgetDelay), + queues: newTenantQueues(maxOutstandingPerTenant, forgetDelay, limits), connectedConsumers: atomic.NewInt32(0), metrics: metrics, pool: NewSlicePool[Request](1<<6, 1<<10, 2), // Buckets are [64, 128, 256, 512, 1024]. @@ -76,12 +81,9 @@ func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, met return q } -// Enqueue puts the request into the queue. MaxQueries is tenant-specific value that specifies how many queriers can -// this tenant use (zero or negative = all queriers). It is passed to each Enqueue, because it can change -// between calls. -// +// Enqueue puts the request into the queue. // If request is successfully enqueued, successFn is called with the lock held, before any querier can receive the request. -func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, maxQueriers int, successFn func()) error { +func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, successFn func()) error { q.mtx.Lock() defer q.mtx.Unlock() @@ -89,10 +91,9 @@ func (q *RequestQueue) Enqueue(tenant string, path []string, req Request, maxQue return ErrStopped } - queue := q.queues.getOrAddQueue(tenant, path, maxQueriers) - if queue == nil { - // This can only happen if tenant is "". - return errors.New("no queue found") + queue, err := q.queues.getOrAddQueue(tenant, path) + if err != nil { + return fmt.Errorf("no queue found: %w", err) } // Optimistically increase queue counter for tenant instead of doing separate @@ -175,9 +176,7 @@ FindQueue: // We need to wait if there are no tenants, or no pending requests for given querier. for (q.queues.hasNoTenantQueues() || querierWait) && ctx.Err() == nil && !q.stopped { querierWait = false - start := time.Now() q.cond.Wait(ctx) - q.metrics.querierWaitTime.WithLabelValues(consumerID).Observe(time.Since(start).Seconds()) } if q.stopped { diff --git a/pkg/queue/queue_test.go b/pkg/queue/queue_test.go index a2cb42441c02e..623e240733886 100644 --- a/pkg/queue/queue_test.go +++ b/pkg/queue/queue_test.go @@ -47,7 +47,7 @@ func BenchmarkGetNextRequest(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) for n := 0; n < b.N; n++ { - queue := NewRequestQueue(maxOutstandingPerTenant, 0, NewMetrics(nil, constants.Loki, "query_scheduler")) + queue := NewRequestQueue(maxOutstandingPerTenant, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler")) queues = append(queues, queue) for ix := 0; ix < queriers; ix++ { @@ -57,7 +57,7 @@ func BenchmarkGetNextRequest(b *testing.B) { for i := 0; i < maxOutstandingPerTenant; i++ { for j := 0; j < numTenants; j++ { userID := strconv.Itoa(j) - err := queue.Enqueue(userID, benchCase.fn(j), "request", 0, nil) + err := queue.Enqueue(userID, benchCase.fn(j), "request", nil) if err != nil { b.Fatal(err) } @@ -105,7 +105,7 @@ func BenchmarkQueueRequest(b *testing.B) { requests := make([]string, 0, numTenants) for n := 0; n < b.N; n++ { - q := NewRequestQueue(maxOutstandingPerTenant, 0, NewMetrics(nil, constants.Loki, "query_scheduler")) + q := NewRequestQueue(maxOutstandingPerTenant, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler")) for ix := 0; ix < queriers; ix++ { q.RegisterConsumerConnection(fmt.Sprintf("querier-%d", ix)) @@ -123,7 +123,7 @@ func BenchmarkQueueRequest(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < maxOutstandingPerTenant; i++ { for j := 0; j < numTenants; j++ { - err := queues[n].Enqueue(users[j], nil, requests[j], 0, nil) + err := queues[n].Enqueue(users[j], nil, requests[j], nil) if err != nil { b.Fatal(err) } @@ -135,7 +135,7 @@ func BenchmarkQueueRequest(b *testing.B) { func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBecauseQuerierHasBeenForgotten(t *testing.T) { const forgetDelay = 3 * time.Second - queue := NewRequestQueue(1, forgetDelay, NewMetrics(nil, constants.Loki, "query_scheduler")) + queue := NewRequestQueue(1, forgetDelay, &mockQueueLimits{maxConsumers: 1}, NewMetrics(nil, constants.Loki, "query_scheduler")) // Start the queue service. ctx := context.Background() @@ -162,7 +162,7 @@ func TestRequestQueue_GetNextRequestForQuerier_ShouldGetRequestAfterReshardingBe // Enqueue a request from an user which would be assigned to querier-1. // NOTE: "user-1" hash falls in the querier-1 shard. - require.NoError(t, queue.Enqueue("user-1", nil, "request", 1, nil)) + require.NoError(t, queue.Enqueue("user-1", nil, "request", nil)) startTime := time.Now() querier2wg.Wait() @@ -306,17 +306,17 @@ func TestContextCond(t *testing.T) { func TestMaxQueueSize(t *testing.T) { t.Run("queue size is tracked per tenant", func(t *testing.T) { maxSize := 3 - queue := NewRequestQueue(maxSize, 0, NewMetrics(nil, constants.Loki, "query_scheduler")) + queue := NewRequestQueue(maxSize, 0, noQueueLimits, NewMetrics(nil, constants.Loki, "query_scheduler")) queue.RegisterConsumerConnection("querier") // enqueue maxSize items with different actors // different actors have individual channels with maxSize length - assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 1, 0, nil)) - assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 2, 0, nil)) - assert.NoError(t, queue.Enqueue("tenant", []string{"user-c"}, 3, 0, nil)) + assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 1, nil)) + assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 2, nil)) + assert.NoError(t, queue.Enqueue("tenant", []string{"user-c"}, 3, nil)) // max queue length per tenant is tracked globally for all actors within a tenant - err := queue.Enqueue("tenant", []string{"user-a"}, 4, 0, nil) + err := queue.Enqueue("tenant", []string{"user-a"}, 4, nil) assert.Equal(t, err, ErrTooManyRequests) // dequeue and enqueue some items @@ -325,10 +325,10 @@ func TestMaxQueueSize(t *testing.T) { _, _, err = queue.Dequeue(context.Background(), StartIndexWithLocalQueue, "querier") assert.NoError(t, err) - assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 4, 0, nil)) - assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 5, 0, nil)) + assert.NoError(t, queue.Enqueue("tenant", []string{"user-a"}, 4, nil)) + assert.NoError(t, queue.Enqueue("tenant", []string{"user-b"}, 5, nil)) - err = queue.Enqueue("tenant", []string{"user-c"}, 6, 0, nil) + err = queue.Enqueue("tenant", []string{"user-c"}, 6, nil) assert.Equal(t, err, ErrTooManyRequests) }) } diff --git a/pkg/queue/tenant_queues.go b/pkg/queue/tenant_queues.go index 46e8a999fb88e..69fac6ed60a01 100644 --- a/pkg/queue/tenant_queues.go +++ b/pkg/queue/tenant_queues.go @@ -6,11 +6,17 @@ package queue import ( + "fmt" "math/rand" "sort" "time" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/tenant" + "github.com/grafana/loki/pkg/util" + util_log "github.com/grafana/loki/pkg/util/log" + "github.com/grafana/loki/pkg/util/validation" ) type intPointerMap map[string]*int @@ -67,6 +73,8 @@ type tenantQueues struct { // sortedConsumer list of consumer IDs, used when creating per-user shard. sortedConsumers []string + + limits Limits } type Queue interface { @@ -87,16 +95,15 @@ type tenantQueue struct { *TreeQueue // If not nil, only these consumers can handle user requests. If nil, all consumers can. - // We set this to nil if number of available consumers <= maxQueriers. - consumers map[string]struct{} - maxQueriers int + // We set this to nil if number of available consumers <= MaxConsumers. + consumers map[string]struct{} // Seed for shuffle sharding of consumers. This seed is based on userID only and is therefore consistent // between different frontends. seed int64 } -func newTenantQueues(maxUserQueueSize int, forgetDelay time.Duration) *tenantQueues { +func newTenantQueues(maxUserQueueSize int, forgetDelay time.Duration, limits Limits) *tenantQueues { mm := &Mapping[*tenantQueue]{} mm.Init(64) return &tenantQueues{ @@ -106,6 +113,7 @@ func newTenantQueues(maxUserQueueSize int, forgetDelay time.Duration) *tenantQue forgetDelay: forgetDelay, consumers: map[string]*consumer{}, sortedConsumers: nil, + limits: limits, } } @@ -118,37 +126,42 @@ func (q *tenantQueues) deleteQueue(tenant string) { } // Returns existing or new queue for a tenant. -// MaxQueriers is used to compute which consumers should handle requests for this tenant. -// If maxQueriers is <= 0, all consumers can handle this tenant's requests. -// If maxQueriers has changed since the last call, consumers for this are recomputed. -func (q *tenantQueues) getOrAddQueue(tenant string, path []string, maxQueriers int) Queue { +func (q *tenantQueues) getOrAddQueue(tenantID string, path []string) (Queue, error) { // Empty tenant is not allowed, as that would break our tenants list ("" is used for free spot). - if tenant == "" { - return nil + if tenantID == "" { + return nil, fmt.Errorf("empty tenant is not allowed") } - if maxQueriers < 0 { - maxQueriers = 0 + // extract tenantIDs to compute limits for multi-tenant queries + tenantIDs, err := tenant.TenantIDsFromOrgID(tenantID) + if err != nil { + return nil, fmt.Errorf("extract tenant ids: %w", err) } - uq := q.mapping.GetByKey(tenant) + uq := q.mapping.GetByKey(tenantID) if uq == nil { uq = &tenantQueue{ - seed: util.ShuffleShardSeed(tenant, ""), + seed: util.ShuffleShardSeed(tenantID, ""), } - uq.TreeQueue = newTreeQueue(q.maxUserQueueSize, tenant) - q.mapping.Put(tenant, uq) + uq.TreeQueue = newTreeQueue(q.maxUserQueueSize, tenantID) + q.mapping.Put(tenantID, uq) } - if uq.maxQueriers != maxQueriers { - uq.maxQueriers = maxQueriers - uq.consumers = shuffleConsumersForTenants(uq.seed, maxQueriers, q.sortedConsumers, nil) + consumersToSelect := validation.SmallestPositiveNonZeroIntPerTenant( + tenantIDs, + func(tenantID string) int { + return q.limits.MaxConsumers(tenantID, len(q.sortedConsumers)) + }, + ) + + if len(uq.consumers) != consumersToSelect { + uq.consumers = shuffleConsumersForTenants(uq.seed, consumersToSelect, q.sortedConsumers, nil) } if len(path) == 0 { - return uq + return uq, nil } - return uq.add(path) + return uq.add(path), nil } // Finds next queue for the consumer. To support fair scheduling between users, client is expected @@ -294,8 +307,23 @@ func (q *tenantQueues) forgetDisconnectedConsumers(now time.Time) int { func (q *tenantQueues) recomputeUserConsumers() { scratchpad := make([]string, 0, len(q.sortedConsumers)) - for _, uq := range q.mapping.Values() { - uq.consumers = shuffleConsumersForTenants(uq.seed, uq.maxQueriers, q.sortedConsumers, scratchpad) + for _, tenantID := range q.mapping.Keys() { + if uq := q.mapping.GetByKey(tenantID); uq != nil { + tenantIDs, err := tenant.TenantIDsFromOrgID(tenantID) + if err != nil { + // this is unlikely to happen since we do tenantID validation when creating the queue. + level.Error(util_log.Logger).Log("msg", "failed to shuffle consumers because of errors in tenantID extraction", "tenant", tenantID, "error", err) + continue + } + + consumersToSelect := validation.SmallestPositiveNonZeroIntPerTenant( + tenantIDs, + func(tenantID string) int { + return q.limits.MaxConsumers(tenantID, len(q.sortedConsumers)) + }, + ) + uq.consumers = shuffleConsumersForTenants(uq.seed, consumersToSelect, q.sortedConsumers, scratchpad) + } } } diff --git a/pkg/queue/tenant_queues_test.go b/pkg/queue/tenant_queues_test.go index 95f2a67963aa7..4f49b8233304d 100644 --- a/pkg/queue/tenant_queues_test.go +++ b/pkg/queue/tenant_queues_test.go @@ -15,53 +15,57 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/scheduler/limits" ) +var noQueueLimits = limits.NewQueueLimits(nil) + func TestQueues(t *testing.T) { - uq := newTenantQueues(0, 0) + uq := newTenantQueues(0, 0, noQueueLimits) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) - uq.addConsumerToConnection("querier-1") - uq.addConsumerToConnection("querier-2") + uq.addConsumerToConnection("consumer-1") + uq.addConsumerToConnection("consumer-2") - q, u, lastUserIndex := uq.getNextQueueForConsumer(-1, "querier-1") + q, u, lastUserIndex := uq.getNextQueueForConsumer(-1, "consumer-1") assert.Nil(t, q) assert.Equal(t, "", u) // Add queues: [one] - qOne := getOrAdd(t, uq, "one", 0) - lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qOne, qOne) + qOne := getOrAdd(t, uq, "one") + lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qOne, qOne) // [one two] - qTwo := getOrAdd(t, uq, "two", 0) + qTwo := getOrAdd(t, uq, "two") assert.NotEqual(t, qOne, qTwo) - lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qOne, qTwo, qOne) - confirmOrderForQuerier(t, uq, "querier-2", -1, qOne, qTwo, qOne) + lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qTwo, qOne, qTwo, qOne) + confirmOrderForConsumer(t, uq, "consumer-2", -1, qOne, qTwo, qOne) // [one two three] // confirm fifo by adding a third queue and iterating to it - qThree := getOrAdd(t, uq, "three", 0) + qThree := getOrAdd(t, uq, "three") - lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qThree, qOne) + lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qTwo, qThree, qOne) // Remove one: ["" two three] uq.deleteQueue("one") assert.NoError(t, isConsistent(uq)) - lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qTwo, qThree, qTwo) + lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qTwo, qThree, qTwo) // "four" is added at the beginning of the list: [four two three] - qFour := getOrAdd(t, uq, "four", 0) + qFour := getOrAdd(t, uq, "four") - lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qThree, qFour, qTwo, qThree) + lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qThree, qFour, qTwo, qThree) // Remove two: [four "" three] uq.deleteQueue("two") assert.NoError(t, isConsistent(uq)) - lastUserIndex = confirmOrderForQuerier(t, uq, "querier-1", lastUserIndex, qFour, qThree, qFour) + lastUserIndex = confirmOrderForConsumer(t, uq, "consumer-1", lastUserIndex, qFour, qThree, qFour) // Remove three: [four] uq.deleteQueue("three") @@ -71,55 +75,55 @@ func TestQueues(t *testing.T) { uq.deleteQueue("four") assert.NoError(t, isConsistent(uq)) - q, _, _ = uq.getNextQueueForConsumer(lastUserIndex, "querier-1") + q, _, _ = uq.getNextQueueForConsumer(lastUserIndex, "consumer-1") assert.Nil(t, q) } -func TestQueuesOnTerminatingQuerier(t *testing.T) { - uq := newTenantQueues(0, 0) +func TestQueuesOnTerminatingConsumer(t *testing.T) { + uq := newTenantQueues(0, 0, noQueueLimits) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) - uq.addConsumerToConnection("querier-1") - uq.addConsumerToConnection("querier-2") + uq.addConsumerToConnection("consumer-1") + uq.addConsumerToConnection("consumer-2") // Add queues: [one, two] - qOne := getOrAdd(t, uq, "one", 0) - qTwo := getOrAdd(t, uq, "two", 0) - confirmOrderForQuerier(t, uq, "querier-1", -1, qOne, qTwo, qOne, qTwo) - confirmOrderForQuerier(t, uq, "querier-2", -1, qOne, qTwo, qOne, qTwo) - - // After notify shutdown for querier-2, it's expected to own no queue. - uq.notifyQuerierShutdown("querier-2") - q, u, _ := uq.getNextQueueForConsumer(-1, "querier-2") + qOne := getOrAdd(t, uq, "one") + qTwo := getOrAdd(t, uq, "two") + confirmOrderForConsumer(t, uq, "consumer-1", -1, qOne, qTwo, qOne, qTwo) + confirmOrderForConsumer(t, uq, "consumer-2", -1, qOne, qTwo, qOne, qTwo) + + // After notify shutdown for consumer-2, it's expected to own no queue. + uq.notifyQuerierShutdown("consumer-2") + q, u, _ := uq.getNextQueueForConsumer(-1, "consumer-2") assert.Nil(t, q) assert.Equal(t, "", u) - // However, querier-1 still get queues because it's still running. - confirmOrderForQuerier(t, uq, "querier-1", -1, qOne, qTwo, qOne, qTwo) + // However, consumer-1 still get queues because it's still running. + confirmOrderForConsumer(t, uq, "consumer-1", -1, qOne, qTwo, qOne, qTwo) - // After disconnecting querier-2, it's expected to own no queue. - uq.removeConsumer("querier-2") - q, u, _ = uq.getNextQueueForConsumer(-1, "querier-2") + // After disconnecting consumer-2, it's expected to own no queue. + uq.removeConsumer("consumer-2") + q, u, _ = uq.getNextQueueForConsumer(-1, "consumer-2") assert.Nil(t, q) assert.Equal(t, "", u) } -func TestQueuesWithQueriers(t *testing.T) { - uq := newTenantQueues(0, 0) +func TestQueuesWithConsumers(t *testing.T) { + maxConsumers := 5 + uq := newTenantQueues(0, 0, &mockQueueLimits{maxConsumers: maxConsumers}) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) - queriers := 30 + consumers := 30 users := 1000 - maxQueriersPerUser := 5 - // Add some queriers. - for ix := 0; ix < queriers; ix++ { - qid := fmt.Sprintf("querier-%d", ix) + // Add some consumers. + for ix := 0; ix < consumers; ix++ { + qid := fmt.Sprintf("consumer-%d", ix) uq.addConsumerToConnection(qid) - // No querier has any queues yet. + // No consumer has any queues yet. q, u, _ := uq.getNextQueueForConsumer(-1, qid) assert.Nil(t, q) assert.Equal(t, "", u) @@ -130,19 +134,19 @@ func TestQueuesWithQueriers(t *testing.T) { // Add user queues. for u := 0; u < users; u++ { uid := fmt.Sprintf("user-%d", u) - getOrAdd(t, uq, uid, maxQueriersPerUser) + getOrAdd(t, uq, uid) - // Verify it has maxQueriersPerUser queriers assigned now. + // Verify it has maxConsumers consumers assigned now. qs := uq.mapping.GetByKey(uid).consumers - assert.Equal(t, maxQueriersPerUser, len(qs)) + assert.Equal(t, maxConsumers, len(qs)) } - // After adding all users, verify results. For each querier, find out how many different users it handles, + // After adding all users, verify results. For each consumer, find out how many different users it handles, // and compute mean and stdDev. - queriersMap := make(map[string]int) + consumerMap := make(map[string]int) - for q := 0; q < queriers; q++ { - qid := fmt.Sprintf("querier-%d", q) + for q := 0; q < consumers; q++ { + qid := fmt.Sprintf("consumer-%d", q) lastUserIndex := StartIndex for { @@ -151,25 +155,25 @@ func TestQueuesWithQueriers(t *testing.T) { break } lastUserIndex = newIx - queriersMap[qid]++ + consumerMap[qid]++ } } mean := float64(0) - for _, c := range queriersMap { + for _, c := range consumerMap { mean += float64(c) } - mean = mean / float64(len(queriersMap)) + mean = mean / float64(len(consumerMap)) stdDev := float64(0) - for _, c := range queriersMap { + for _, c := range consumerMap { d := float64(c) - mean stdDev += (d * d) } - stdDev = math.Sqrt(stdDev / float64(len(queriersMap))) + stdDev = math.Sqrt(stdDev / float64(len(consumerMap))) t.Log("mean:", mean, "stddev:", stdDev) - assert.InDelta(t, users*maxQueriersPerUser/queriers, mean, 1) + assert.InDelta(t, users*maxConsumers/consumers, mean, 1) assert.InDelta(t, stdDev, 0, mean*0.2) } @@ -183,7 +187,7 @@ func TestQueuesConsistency(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - uq := newTenantQueues(0, testData.forgetDelay) + uq := newTenantQueues(0, testData.forgetDelay, &mockQueueLimits{maxConsumers: 3}) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) @@ -196,25 +200,27 @@ func TestQueuesConsistency(t *testing.T) { for i := 0; i < 10000; i++ { switch r.Int() % 6 { case 0: - assert.NotNil(t, uq.getOrAddQueue(generateTenant(r), generateActor(r), 3)) + q, err := uq.getOrAddQueue(generateTenant(r), generateActor(r)) + assert.NoError(t, err) + assert.NotNil(t, q) case 1: - qid := generateQuerier(r) + qid := generateConsumer(r) _, _, luid := uq.getNextQueueForConsumer(lastUserIndexes[qid], qid) lastUserIndexes[qid] = luid case 2: uq.deleteQueue(generateTenant(r)) case 3: - q := generateQuerier(r) + q := generateConsumer(r) uq.addConsumerToConnection(q) conns[q]++ case 4: - q := generateQuerier(r) + q := generateConsumer(r) if conns[q] > 0 { uq.removeConsumerConnection(q, time.Now()) conns[q]-- } case 5: - q := generateQuerier(r) + q := generateConsumer(r) uq.notifyQuerierShutdown(q) } @@ -226,166 +232,166 @@ func TestQueuesConsistency(t *testing.T) { func TestQueues_ForgetDelay(t *testing.T) { const ( - forgetDelay = time.Minute - maxQueriersPerUser = 1 - numUsers = 100 + forgetDelay = time.Minute + maxConsumers = 1 + numUsers = 100 ) now := time.Now() - uq := newTenantQueues(0, forgetDelay) + uq := newTenantQueues(0, forgetDelay, &mockQueueLimits{maxConsumers: maxConsumers}) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) - // 3 queriers open 2 connections each. + // 3 consumers open 2 connections each. for i := 1; i <= 3; i++ { - uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) - uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i)) } // Add user queues. for i := 0; i < numUsers; i++ { userID := fmt.Sprintf("user-%d", i) - getOrAdd(t, uq, userID, maxQueriersPerUser) + getOrAdd(t, uq, userID) } - // We expect querier-1 to have some users. - querier1Users := getUsersByQuerier(uq, "querier-1") - require.NotEmpty(t, querier1Users) + // We expect consumer-1 to have some users. + consumer1Users := getUsersByConsumer(uq, "consumer-1") + require.NotEmpty(t, consumer1Users) - // Gracefully shutdown querier-1. - uq.removeConsumerConnection("querier-1", now.Add(20*time.Second)) - uq.removeConsumerConnection("querier-1", now.Add(21*time.Second)) - uq.notifyQuerierShutdown("querier-1") + // Gracefully shutdown consumer-1. + uq.removeConsumerConnection("consumer-1", now.Add(20*time.Second)) + uq.removeConsumerConnection("consumer-1", now.Add(21*time.Second)) + uq.notifyQuerierShutdown("consumer-1") - // We expect querier-1 has been removed. - assert.NotContains(t, uq.consumers, "querier-1") + // We expect consumer-1 has been removed. + assert.NotContains(t, uq.consumers, "consumer-1") assert.NoError(t, isConsistent(uq)) - // We expect querier-1 users have been shuffled to other queriers. - for _, userID := range querier1Users { - assert.Contains(t, append(getUsersByQuerier(uq, "querier-2"), getUsersByQuerier(uq, "querier-3")...), userID) + // We expect consumer-1 users have been shuffled to other consumers. + for _, userID := range consumer1Users { + assert.Contains(t, append(getUsersByConsumer(uq, "consumer-2"), getUsersByConsumer(uq, "consumer-3")...), userID) } - // Querier-1 reconnects. - uq.addConsumerToConnection("querier-1") - uq.addConsumerToConnection("querier-1") + // Consumer-1 reconnects. + uq.addConsumerToConnection("consumer-1") + uq.addConsumerToConnection("consumer-1") - // We expect the initial querier-1 users have got back to querier-1. - for _, userID := range querier1Users { - assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID) + // We expect the initial consumer-1 users have got back to consumer-1. + for _, userID := range consumer1Users { + assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID) } - // Querier-1 abruptly terminates (no shutdown notification received). - uq.removeConsumerConnection("querier-1", now.Add(40*time.Second)) - uq.removeConsumerConnection("querier-1", now.Add(41*time.Second)) + // Consumer-1 abruptly terminates (no shutdown notification received). + uq.removeConsumerConnection("consumer-1", now.Add(40*time.Second)) + uq.removeConsumerConnection("consumer-1", now.Add(41*time.Second)) - // We expect querier-1 has NOT been removed. - assert.Contains(t, uq.consumers, "querier-1") + // We expect consumer-1 has NOT been removed. + assert.Contains(t, uq.consumers, "consumer-1") assert.NoError(t, isConsistent(uq)) - // We expect the querier-1 users have not been shuffled to other queriers. - for _, userID := range querier1Users { - assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID) + // We expect the consumer-1 users have not been shuffled to other consumers. + for _, userID := range consumer1Users { + assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID) } - // Try to forget disconnected queriers, but querier-1 forget delay hasn't passed yet. + // Try to forget disconnected consumers, but consumer-1 forget delay hasn't passed yet. uq.forgetDisconnectedConsumers(now.Add(90 * time.Second)) - assert.Contains(t, uq.consumers, "querier-1") + assert.Contains(t, uq.consumers, "consumer-1") assert.NoError(t, isConsistent(uq)) - for _, userID := range querier1Users { - assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID) + for _, userID := range consumer1Users { + assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID) } - // Try to forget disconnected queriers. This time querier-1 forget delay has passed. + // Try to forget disconnected consumers. This time consumer-1 forget delay has passed. uq.forgetDisconnectedConsumers(now.Add(105 * time.Second)) - assert.NotContains(t, uq.consumers, "querier-1") + assert.NotContains(t, uq.consumers, "consumer-1") assert.NoError(t, isConsistent(uq)) - // We expect querier-1 users have been shuffled to other queriers. - for _, userID := range querier1Users { - assert.Contains(t, append(getUsersByQuerier(uq, "querier-2"), getUsersByQuerier(uq, "querier-3")...), userID) + // We expect consumer-1 users have been shuffled to other consumers. + for _, userID := range consumer1Users { + assert.Contains(t, append(getUsersByConsumer(uq, "consumer-2"), getUsersByConsumer(uq, "consumer-3")...), userID) } } -func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForgetDelayIsPassed(t *testing.T) { +func TestQueues_ForgetDelay_ShouldCorrectlyHandleConsumerReconnectingBeforeForgetDelayIsPassed(t *testing.T) { const ( - forgetDelay = time.Minute - maxQueriersPerUser = 1 - numUsers = 100 + forgetDelay = time.Minute + maxConsumers = 1 + numUsers = 100 ) now := time.Now() - uq := newTenantQueues(0, forgetDelay) + uq := newTenantQueues(0, forgetDelay, &mockQueueLimits{maxConsumers: maxConsumers}) assert.NotNil(t, uq) assert.NoError(t, isConsistent(uq)) - // 3 queriers open 2 connections each. + // 3 consumers open 2 connections each. for i := 1; i <= 3; i++ { - uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) - uq.addConsumerToConnection(fmt.Sprintf("querier-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i)) + uq.addConsumerToConnection(fmt.Sprintf("consumer-%d", i)) } // Add user queues. for i := 0; i < numUsers; i++ { userID := fmt.Sprintf("user-%d", i) - getOrAdd(t, uq, userID, maxQueriersPerUser) + getOrAdd(t, uq, userID) } - // We expect querier-1 to have some users. - querier1Users := getUsersByQuerier(uq, "querier-1") - require.NotEmpty(t, querier1Users) + // We expect consumer-1 to have some users. + consumer1Users := getUsersByConsumer(uq, "consumer-1") + require.NotEmpty(t, consumer1Users) - // Querier-1 abruptly terminates (no shutdown notification received). - uq.removeConsumerConnection("querier-1", now.Add(40*time.Second)) - uq.removeConsumerConnection("querier-1", now.Add(41*time.Second)) + // Consumer-1 abruptly terminates (no shutdown notification received). + uq.removeConsumerConnection("consumer-1", now.Add(40*time.Second)) + uq.removeConsumerConnection("consumer-1", now.Add(41*time.Second)) - // We expect querier-1 has NOT been removed. - assert.Contains(t, uq.consumers, "querier-1") + // We expect consumer-1 has NOT been removed. + assert.Contains(t, uq.consumers, "consumer-1") assert.NoError(t, isConsistent(uq)) - // We expect the querier-1 users have not been shuffled to other queriers. - for _, userID := range querier1Users { - assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID) + // We expect the consumer-1 users have not been shuffled to other consumers. + for _, userID := range consumer1Users { + assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID) } - // Try to forget disconnected queriers, but querier-1 forget delay hasn't passed yet. + // Try to forget disconnected consumers, but consumer-1 forget delay hasn't passed yet. uq.forgetDisconnectedConsumers(now.Add(90 * time.Second)) - // Querier-1 reconnects. - uq.addConsumerToConnection("querier-1") - uq.addConsumerToConnection("querier-1") + // Consumer-1 reconnects. + uq.addConsumerToConnection("consumer-1") + uq.addConsumerToConnection("consumer-1") - assert.Contains(t, uq.consumers, "querier-1") + assert.Contains(t, uq.consumers, "consumer-1") assert.NoError(t, isConsistent(uq)) - // We expect the querier-1 users have not been shuffled to other queriers. - for _, userID := range querier1Users { - assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID) + // We expect the consumer-1 users have not been shuffled to other consumers. + for _, userID := range consumer1Users { + assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID) } - // Try to forget disconnected queriers far in the future, but there's no disconnected querier. + // Try to forget disconnected consumers far in the future, but there's no disconnected consumer. uq.forgetDisconnectedConsumers(now.Add(200 * time.Second)) - assert.Contains(t, uq.consumers, "querier-1") + assert.Contains(t, uq.consumers, "consumer-1") assert.NoError(t, isConsistent(uq)) - for _, userID := range querier1Users { - assert.Contains(t, getUsersByQuerier(uq, "querier-1"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-2"), userID) - assert.NotContains(t, getUsersByQuerier(uq, "querier-3"), userID) + for _, userID := range consumer1Users { + assert.Contains(t, getUsersByConsumer(uq, "consumer-1"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-2"), userID) + assert.NotContains(t, getUsersByConsumer(uq, "consumer-3"), userID) } } @@ -397,24 +403,27 @@ func generateTenant(r *rand.Rand) string { return fmt.Sprint("tenant-", r.Int()%5) } -func generateQuerier(r *rand.Rand) string { - return fmt.Sprint("querier-", r.Int()%5) +func generateConsumer(r *rand.Rand) string { + return fmt.Sprint("consumer-", r.Int()%5) } -func getOrAdd(t *testing.T, uq *tenantQueues, tenant string, maxQueriers int) Queue { +func getOrAdd(t *testing.T, uq *tenantQueues, tenant string) Queue { actor := []string{} - q := uq.getOrAddQueue(tenant, actor, maxQueriers) + q, err := uq.getOrAddQueue(tenant, actor) + assert.NoError(t, err) assert.NotNil(t, q) assert.NoError(t, isConsistent(uq)) - assert.Equal(t, q, uq.getOrAddQueue(tenant, actor, maxQueriers)) + q2, err := uq.getOrAddQueue(tenant, actor) + assert.NoError(t, err) + assert.Equal(t, q, q2) return q } -func confirmOrderForQuerier(t *testing.T, uq *tenantQueues, querier string, lastUserIndex QueueIndex, qs ...Queue) QueueIndex { +func confirmOrderForConsumer(t *testing.T, uq *tenantQueues, consumer string, lastUserIndex QueueIndex, qs ...Queue) QueueIndex { t.Helper() var n Queue for _, q := range qs { - n, _, lastUserIndex = uq.getNextQueueForConsumer(lastUserIndex, querier) + n, _, lastUserIndex = uq.getNextQueueForConsumer(lastUserIndex, consumer) assert.Equal(t, q, n) assert.NoError(t, isConsistent(uq)) } @@ -423,7 +432,7 @@ func confirmOrderForQuerier(t *testing.T, uq *tenantQueues, querier string, last func isConsistent(uq *tenantQueues) error { if len(uq.sortedConsumers) != len(uq.consumers) { - return fmt.Errorf("inconsistent number of sorted queriers and querier connections") + return fmt.Errorf("inconsistent number of sorted consumers and consumer connections") } uc := 0 @@ -441,16 +450,17 @@ func isConsistent(uq *tenantQueues) error { uc++ - if q.maxQueriers == 0 && q.consumers != nil { - return fmt.Errorf("user %s has queriers, but maxQueriers=0", u) + maxConsumers := uq.limits.MaxConsumers(u, len(uq.consumers)) + if maxConsumers == 0 && q.consumers != nil { + return fmt.Errorf("consumers for user %s should be nil when no limits are set (when MaxConsumers is 0)", u) } - if q.maxQueriers > 0 && len(uq.sortedConsumers) <= q.maxQueriers && q.consumers != nil { - return fmt.Errorf("user %s has queriers set despite not enough queriers available", u) + if maxConsumers > 0 && len(uq.sortedConsumers) <= maxConsumers && q.consumers != nil { + return fmt.Errorf("consumers for user %s should be nil when MaxConsumers allowed is higher than the available consumers", u) } - if q.maxQueriers > 0 && len(uq.sortedConsumers) > q.maxQueriers && len(q.consumers) != q.maxQueriers { - return fmt.Errorf("user %s has incorrect number of queriers, expected=%d, got=%d", u, len(q.consumers), q.maxQueriers) + if maxConsumers > 0 && len(uq.sortedConsumers) > maxConsumers && len(q.consumers) != maxConsumers { + return fmt.Errorf("user %s has incorrect number of consumers, expected=%d, got=%d", u, maxConsumers, len(q.consumers)) } } @@ -461,67 +471,75 @@ func isConsistent(uq *tenantQueues) error { return nil } -// getUsersByQuerier returns the list of users handled by the provided querierID. -func getUsersByQuerier(queues *tenantQueues, querierID string) []string { +// getUsersByConsumer returns the list of users handled by the provided consumerID. +func getUsersByConsumer(queues *tenantQueues, consumerID string) []string { var userIDs []string for _, userID := range queues.mapping.Keys() { q := queues.mapping.GetByKey(userID) if q.consumers == nil { - // If it's nil then all queriers can handle this user. + // If it's nil then all consumers can handle this user. userIDs = append(userIDs, userID) continue } - if _, ok := q.consumers[querierID]; ok { + if _, ok := q.consumers[consumerID]; ok { userIDs = append(userIDs, userID) } } return userIDs } -func TestShuffleQueriers(t *testing.T) { - allQueriers := []string{"a", "b", "c", "d", "e"} +func TestShuffleConsumers(t *testing.T) { + allConsumers := []string{"a", "b", "c", "d", "e"} - require.Nil(t, shuffleConsumersForTenants(12345, 10, allQueriers, nil)) - require.Nil(t, shuffleConsumersForTenants(12345, len(allQueriers), allQueriers, nil)) + require.Nil(t, shuffleConsumersForTenants(12345, 10, allConsumers, nil)) + require.Nil(t, shuffleConsumersForTenants(12345, len(allConsumers), allConsumers, nil)) - r1 := shuffleConsumersForTenants(12345, 3, allQueriers, nil) + r1 := shuffleConsumersForTenants(12345, 3, allConsumers, nil) require.Equal(t, 3, len(r1)) // Same input produces same output. - r2 := shuffleConsumersForTenants(12345, 3, allQueriers, nil) + r2 := shuffleConsumersForTenants(12345, 3, allConsumers, nil) require.Equal(t, 3, len(r2)) require.Equal(t, r1, r2) } -func TestShuffleQueriersCorrectness(t *testing.T) { - const queriersCount = 100 +func TestShuffleConsumersCorrectness(t *testing.T) { + const consumersCount = 100 - var allSortedQueriers []string - for i := 0; i < queriersCount; i++ { - allSortedQueriers = append(allSortedQueriers, fmt.Sprintf("%d", i)) + var allSortedConsumers []string + for i := 0; i < consumersCount; i++ { + allSortedConsumers = append(allSortedConsumers, fmt.Sprintf("%d", i)) } - sort.Strings(allSortedQueriers) + sort.Strings(allSortedConsumers) r := rand.New(rand.NewSource(time.Now().UnixNano())) const tests = 1000 for i := 0; i < tests; i++ { - toSelect := r.Intn(queriersCount) + toSelect := r.Intn(consumersCount) if toSelect == 0 { toSelect = 3 } - selected := shuffleConsumersForTenants(r.Int63(), toSelect, allSortedQueriers, nil) + selected := shuffleConsumersForTenants(r.Int63(), toSelect, allSortedConsumers, nil) require.Equal(t, toSelect, len(selected)) - sort.Strings(allSortedQueriers) - prevQuerier := "" - for _, q := range allSortedQueriers { - require.True(t, prevQuerier < q, "non-unique querier") - prevQuerier = q + sort.Strings(allSortedConsumers) + prevConsumer := "" + for _, q := range allSortedConsumers { + require.True(t, prevConsumer < q, "non-unique consumer") + prevConsumer = q - ix := sort.SearchStrings(allSortedQueriers, q) - require.True(t, ix < len(allSortedQueriers) && allSortedQueriers[ix] == q, "selected querier is not between all queriers") + ix := sort.SearchStrings(allSortedConsumers, q) + require.True(t, ix < len(allSortedConsumers) && allSortedConsumers[ix] == q, "selected consumer is not between all consumers") } } } + +type mockQueueLimits struct { + maxConsumers int +} + +func (l *mockQueueLimits) MaxConsumers(_ string, _ int) int { + return l.maxConsumers +} diff --git a/pkg/scheduler/limits/definitions.go b/pkg/scheduler/limits/definitions.go index 2a00db7d4a6db..e2c2e26cca6f2 100644 --- a/pkg/scheduler/limits/definitions.go +++ b/pkg/scheduler/limits/definitions.go @@ -1,7 +1,46 @@ package limits +import ( + "math" +) + // Limits needed for the Query Scheduler - interface used for decoupling. type Limits interface { // MaxQueriersPerUser returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. - MaxQueriersPerUser(user string) int + MaxQueriersPerUser(user string) uint + + // MaxQueryCapacity returns how much of the available query capacity can be used by this user. + MaxQueryCapacity(user string) float64 +} + +func NewQueueLimits(limits Limits) *QueueLimits { + return &QueueLimits{limits: limits} +} + +type QueueLimits struct { + limits Limits +} + +// MaxConsumers is used to compute how many of the available queriers are allowed to handle requests for a given tenant. +// Returns the min value or one of (frontend.max-queriers-per-tenant, ceil(querier_replicas * frontend.max-query-capacity)) +// depending of whether both or only one of the two limits are configured. +// 0 is returned when neither limits are applied. +func (c *QueueLimits) MaxConsumers(tenantID string, allConsumers int) int { + if c == nil || c.limits == nil { + return 0 + } + + maxQueriers := int(c.limits.MaxQueriersPerUser(tenantID)) + maxCapacity := c.limits.MaxQueryCapacity(tenantID) + + if maxCapacity == 0 { + return maxQueriers + } + + res := int(math.Ceil(float64(allConsumers) * maxCapacity)) + if maxQueriers != 0 && maxQueriers < res { + return maxQueriers + } + + return res } diff --git a/pkg/scheduler/limits/definitions_test.go b/pkg/scheduler/limits/definitions_test.go new file mode 100644 index 0000000000000..26139e2186900 --- /dev/null +++ b/pkg/scheduler/limits/definitions_test.go @@ -0,0 +1,78 @@ +package limits + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestQueueLimitsMaxConsumers(t *testing.T) { + for name, tt := range map[string]struct { + limits *QueueLimits + expected int + }{ + "nil limits": { + limits: NewQueueLimits(nil), + expected: 0, + }, + "no limits": { + limits: NewQueueLimits(mockLimits{ + maxQueriers: 0, + maxQueryCapacity: 0, + }), + expected: 0, + }, + "enforce max queriers": { + limits: NewQueueLimits(mockLimits{ + maxQueriers: 5, + maxQueryCapacity: 0, + }), + expected: 5, + }, + "prefer max queriers over query capacity": { + limits: NewQueueLimits(mockLimits{ + maxQueriers: 5, + maxQueryCapacity: 1.0, + }), + expected: 5, + }, + "enforce max query capacity": { + limits: NewQueueLimits(mockLimits{ + maxQueriers: 0, + maxQueryCapacity: 0.5, + }), + expected: 5, + }, + "prefer query capacity over max queriers": { + limits: NewQueueLimits(mockLimits{ + maxQueriers: 5, + maxQueryCapacity: 0.4, + }), + expected: 4, + }, + "query capacity of 1.0": { + limits: NewQueueLimits(mockLimits{ + maxQueryCapacity: 1.0, + }), + expected: 10, + }, + } { + t.Run(name, func(t *testing.T) { + res := tt.limits.MaxConsumers("", 10) + assert.Equal(t, tt.expected, res) + }) + } +} + +type mockLimits struct { + maxQueriers uint + maxQueryCapacity float64 +} + +func (l mockLimits) MaxQueriersPerUser(_ string) uint { + return l.maxQueriers +} + +func (l mockLimits) MaxQueryCapacity(_ string) float64 { + return l.maxQueryCapacity +} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 305d47b17e571..5cd163ff0ffa1 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -19,7 +19,6 @@ import ( "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" - "github.com/grafana/dskit/tenant" "github.com/grafana/dskit/user" otgrpc "github.com/opentracing-contrib/go-grpc" "github.com/opentracing/opentracing-go" @@ -38,7 +37,6 @@ import ( lokigrpc "github.com/grafana/loki/pkg/util/httpgrpc" lokihttpreq "github.com/grafana/loki/pkg/util/httpreq" lokiring "github.com/grafana/loki/pkg/util/ring" - "github.com/grafana/loki/pkg/util/validation" ) var errSchedulerIsNotRunning = errors.New("scheduler is not running") @@ -117,7 +115,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { } // NewScheduler creates a new Scheduler. -func NewScheduler(cfg Config, limits Limits, log log.Logger, ringManager *lokiring.RingManager, registerer prometheus.Registerer, metricsNamespace string) (*Scheduler, error) { +func NewScheduler(cfg Config, schedulerLimits Limits, log log.Logger, ringManager *lokiring.RingManager, registerer prometheus.Registerer, metricsNamespace string) (*Scheduler, error) { if cfg.UseSchedulerRing { if ringManager == nil { return nil, errors.New("ring manager can't be empty when use_scheduler_ring is true") @@ -130,13 +128,13 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, ringManager *lokiri s := &Scheduler{ cfg: cfg, log: log, - limits: limits, + limits: schedulerLimits, pendingRequests: map[requestKey]*schedulerRequest{}, connectedFrontends: map[string]*connectedFrontend{}, queueMetrics: queueMetrics, ringManager: ringManager, - requestQueue: queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, queueMetrics), + requestQueue: queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, limits.NewQueueLimits(schedulerLimits), queueMetrics), } s.queueDuration = promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ @@ -353,13 +351,6 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr req.queueTime = now req.ctxCancel = cancel - // aggregate the max queriers limit in the case of a multi tenant query - tenantIDs, err := tenant.TenantIDsFromOrgID(req.tenantID) - if err != nil { - return err - } - maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, s.limits.MaxQueriersPerUser) - var queuePath []string if s.cfg.MaxQueueHierarchyLevels > 0 { queuePath = msg.QueuePath @@ -378,7 +369,7 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr } s.activeUsers.UpdateUserTimestamp(req.tenantID, now) - return s.requestQueue.Enqueue(req.tenantID, queuePath, req, maxQueriers, func() { + return s.requestQueue.Enqueue(req.tenantID, queuePath, req, func() { shouldCancel = false s.pendingRequestsMu.Lock() diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index 693c983736e77..b154f18fba788 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -115,9 +115,7 @@ func newMetrics(r prometheus.Registerer, namespace, subsystem string) *metrics { } func clearCache(cache map[string]interface{}) { - for k := range cache { - delete(cache, k) - } + clear(cache) } // prefixedToken returns a byte slice with sufficient capacity for a chunk-ref prefixed token @@ -138,7 +136,7 @@ func prefixedToken(ngram int, chk logproto.ChunkRef) ([]byte, int) { } // PopulateSeriesWithBloom is intended to be called on the write path, and is used to populate the bloom filter for a given series. -func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks []chunk.Chunk) { +func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks []chunk.Chunk) error { startTime := time.Now().UnixMilli() clearCache(bt.cache) @@ -149,7 +147,6 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo tokenBuf, prefixLn := prefixedToken(bt.lineTokenizer.N, chunks[idx].ChunkRef) chunkTotalUncompressedSize += lc.UncompressedSize() - // TODO: error handling itr, err := lc.Iterator( context.Background(), time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps? @@ -158,8 +155,8 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo log.NewNoopPipeline().ForStream(chunks[idx].Metric), ) if err != nil { - level.Info(util_log.Logger).Log("chunk iterator cannot be created") - return + level.Error(util_log.Logger).Log("msg", "chunk iterator cannot be created", "err", err) + return err } defer itr.Close() @@ -218,6 +215,7 @@ func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBlo bt.metrics.bloomSize.Observe(float64(seriesWithBloom.Bloom.ScalableBloomFilter.Capacity() / eightBits)) bt.metrics.sbfCreationTime.Add(float64(endTime - startTime)) bt.metrics.chunkSize.Observe(float64(chunkTotalUncompressedSize)) + return nil } // n ≈ −m ln(1 − p). diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index ab6883e1e6043..4a3f62ccbefa8 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -122,7 +122,8 @@ func TestPopulateSeriesWithBloom(t *testing.T) { Series: &series, } - bt.PopulateSeriesWithBloom(&swb, chunks) + err := bt.PopulateSeriesWithBloom(&swb, chunks) + require.NoError(t, err) tokenizer := NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip) itr := tokenizer.Tokens(testLine) for itr.Next() { @@ -131,8 +132,51 @@ func TestPopulateSeriesWithBloom(t *testing.T) { } } +func BenchmarkPopulateSeriesWithBloom(b *testing.B) { + for i := 0; i < b.N; i++ { + var testLine = lorem + lorem + lorem + bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip) + + sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8) + var lbsList []labels.Labels + lbsList = append(lbsList, labels.FromStrings("foo", "bar")) + + var fpList []model.Fingerprint + for i := range lbsList { + fpList = append(fpList, model.Fingerprint(lbsList[i].Hash())) + } + + var memChunks = make([]*chunkenc.MemChunk, 0) + memChunk0 := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000) + _ = memChunk0.Append(&push.Entry{ + Timestamp: time.Unix(0, 1), + Line: testLine, + }) + memChunks = append(memChunks, memChunk0) + + var chunks = make([]chunk.Chunk, 0) + for i := range memChunks { + chunks = append(chunks, chunk.NewChunk("user", fpList[i], lbsList[i], chunkenc.NewFacade(memChunks[i], 256000, 1500000), model.TimeFromUnixNano(0), model.TimeFromUnixNano(1))) + } + + bloom := Bloom{ + ScalableBloomFilter: *sbf, + } + series := Series{ + Fingerprint: model.Fingerprint(lbsList[0].Hash()), + } + swb := SeriesWithBloom{ + Bloom: &bloom, + Series: &series, + } + + err := bt.PopulateSeriesWithBloom(&swb, chunks) + require.NoError(b, err) + } +} + func BenchmarkMapClear(b *testing.B) { - bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) + bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip) for i := 0; i < b.N; i++ { for k := 0; k < cacheSize; k++ { bt.cache[fmt.Sprint(k)] = k @@ -143,7 +187,7 @@ func BenchmarkMapClear(b *testing.B) { } func BenchmarkNewMap(b *testing.B) { - bt, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) + bt, _ := NewBloomTokenizer(prometheus.NewRegistry(), DefaultNGramLength, DefaultNGramSkip) for i := 0; i < b.N; i++ { for k := 0; k < cacheSize; k++ { bt.cache[fmt.Sprint(k)] = k diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go index d81840229294c..e3e1e065bf084 100644 --- a/pkg/storage/bloom/v1/tokenizer.go +++ b/pkg/storage/bloom/v1/tokenizer.go @@ -11,7 +11,8 @@ const ( func reassemble(buf []rune, ln, pos int, result []byte) []byte { result = result[:0] // Reset the result slice for i := 0; i < ln; i++ { - cur := (pos + i) % len(buf) + cur := pos % len(buf) + pos++ result = utf8.AppendRune(result, buf[cur]) } return result diff --git a/pkg/storage/chunk/cache/resultscache/cache.go b/pkg/storage/chunk/cache/resultscache/cache.go new file mode 100644 index 0000000000000..0999ca3271068 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/cache.go @@ -0,0 +1,467 @@ +package resultscache + +import ( + "context" + "fmt" + "net/http" + "sort" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/grafana/dskit/httpgrpc" + "github.com/opentracing/opentracing-go" + otlog "github.com/opentracing/opentracing-go/log" + "github.com/prometheus/common/model" + "github.com/uber/jaeger-client-go" + + "github.com/grafana/dskit/tenant" + + "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/util/math" + "github.com/grafana/loki/pkg/util/spanlogger" + "github.com/grafana/loki/pkg/util/validation" +) + +// ConstSplitter is a utility for using a constant split interval when determining cache keys +type ConstSplitter time.Duration + +// GenerateCacheKey generates a cache key based on the userID, Request and interval. +func (t ConstSplitter) GenerateCacheKey(_ context.Context, userID string, r Request) string { + currentInterval := r.GetStart().UnixMilli() / int64(time.Duration(t)/time.Millisecond) + return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval) +} + +// ShouldCacheReqFn checks whether the current request should go to cache or not. +// If not, just send the request to next handler. +type ShouldCacheReqFn func(ctx context.Context, r Request) bool + +// ShouldCacheResFn checks whether the current response should go to cache or not. +type ShouldCacheResFn func(ctx context.Context, r Request, res Response, maxCacheTime int64) bool + +// ParallelismForReqFn returns the parallelism for a given request. +type ParallelismForReqFn func(ctx context.Context, tenantIDs []string, r Request) int + +type ResultsCache struct { + logger log.Logger + next Handler + cache cache.Cache + limits Limits + splitter KeyGenerator + cacheGenNumberLoader CacheGenNumberLoader + retentionEnabled bool + extractor Extractor + minCacheExtent int64 // discard any cache extent smaller than this + merger ResponseMerger + shouldCacheReq ShouldCacheReqFn + shouldCacheRes ShouldCacheResFn + parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int +} + +// NewResultsCache creates results cache from config. +// The middleware cache result using a unique cache key for a given request (step,query,user) and interval. +// The cache assumes that each request length (end-start) is below or equal the interval. +// Each request starting from within the same interval will hit the same cache entry. +// If the cache doesn't have the entire duration of the request cached, it will query the uncached parts and append them to the cache entries. +// see `generateKey`. +func NewResultsCache( + logger log.Logger, + c cache.Cache, + next Handler, + keyGen KeyGenerator, + limits Limits, + merger ResponseMerger, + extractor Extractor, + shouldCacheReq ShouldCacheReqFn, + shouldCacheRes ShouldCacheResFn, + parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int, + cacheGenNumberLoader CacheGenNumberLoader, + retentionEnabled bool, +) *ResultsCache { + return &ResultsCache{ + logger: logger, + next: next, + cache: c, + limits: limits, + splitter: keyGen, + cacheGenNumberLoader: cacheGenNumberLoader, + retentionEnabled: retentionEnabled, + extractor: extractor, + minCacheExtent: (5 * time.Minute).Milliseconds(), + merger: merger, + shouldCacheReq: shouldCacheReq, + shouldCacheRes: shouldCacheRes, + parallelismForReq: parallelismForReq, + } +} + +func (s ResultsCache) Do(ctx context.Context, r Request) (Response, error) { + sp, ctx := opentracing.StartSpanFromContext(ctx, "resultsCache.Do") + defer sp.Finish() + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + + if s.shouldCacheReq != nil && !s.shouldCacheReq(ctx, r) { + return s.next.Do(ctx, r) + } + + if s.cacheGenNumberLoader != nil && s.retentionEnabled { + ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs)) + } + + var ( + key = s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r) + extents []Extent + response Response + ) + + sp.LogKV( + "query", r.GetQuery(), + "step", time.UnixMilli(r.GetStep()), + "start", r.GetStart(), + "end", r.GetEnd(), + "key", key, + ) + + cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) } + maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture) + maxCacheTime := int64(model.Now().Add(-maxCacheFreshness)) + if r.GetStart().UnixMilli() > maxCacheTime { + return s.next.Do(ctx, r) + } + + cached, ok := s.get(ctx, key) + if ok { + response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime) + } else { + response, extents, err = s.handleMiss(ctx, r, maxCacheTime) + } + + if err == nil && len(extents) > 0 { + extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents) + if err != nil { + return nil, err + } + s.put(ctx, key, extents) + } + + return response, err +} + +func (s ResultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) { + response, err := s.next.Do(ctx, r) + if err != nil { + return nil, nil, err + } + + if s.shouldCacheRes != nil && !s.shouldCacheRes(ctx, r, response, maxCacheTime) { + return response, []Extent{}, nil + } + + extent, err := toExtent(ctx, r, response) + if err != nil { + return nil, nil, err + } + + extents := []Extent{ + extent, + } + return response, extents, nil +} + +func (s ResultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) { + var ( + reqResps []RequestResponse + err error + ) + sp, ctx := opentracing.StartSpanFromContext(ctx, "handleHit") + defer sp.Finish() + log := spanlogger.FromContext(ctx) + defer log.Finish() + + requests, responses, err := s.partition(r, extents) + if err != nil { + return nil, nil, err + } + if len(requests) == 0 { + response, err := s.merger.MergeResponse(responses...) + // No downstream requests so no need to write back to the cache. + return response, nil, err + } + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + reqResps, err = DoRequests(ctx, s.next, requests, s.parallelismForReq(ctx, tenantIDs, r)) + + if err != nil { + return nil, nil, err + } + + for _, reqResp := range reqResps { + responses = append(responses, reqResp.Response) + if s.shouldCacheRes != nil && !s.shouldCacheRes(ctx, r, reqResp.Response, maxCacheTime) { + continue + } + extent, err := toExtent(ctx, reqResp.Request, reqResp.Response) + if err != nil { + return nil, nil, err + } + extents = append(extents, extent) + } + sort.Slice(extents, func(i, j int) bool { + if extents[i].Start == extents[j].Start { + // as an optimization, for two extents starts at the same time, we + // put bigger extent at the front of the slice, which helps + // to reduce the amount of merge we have to do later. + return extents[i].End > extents[j].End + } + + return extents[i].Start < extents[j].Start + }) + + // Merge any extents - potentially overlapping + accumulator, err := newAccumulator(extents[0]) + if err != nil { + return nil, nil, err + } + mergedExtents := make([]Extent, 0, len(extents)) + + for i := 1; i < len(extents); i++ { + if accumulator.End+r.GetStep() < extents[i].Start { + mergedExtents, err = merge(mergedExtents, accumulator) + if err != nil { + return nil, nil, err + } + accumulator, err = newAccumulator(extents[i]) + if err != nil { + return nil, nil, err + } + continue + } + + if accumulator.End >= extents[i].End { + continue + } + + accumulator.TraceId = jaegerTraceID(ctx) + accumulator.End = extents[i].End + currentRes, err := extents[i].toResponse() + if err != nil { + return nil, nil, err + } + merged, err := s.merger.MergeResponse(accumulator.Response, currentRes) + if err != nil { + return nil, nil, err + } + accumulator.Response = merged + } + + mergedExtents, err = merge(mergedExtents, accumulator) + if err != nil { + return nil, nil, err + } + + response, err := s.merger.MergeResponse(responses...) + return response, mergedExtents, err +} + +type accumulator struct { + Response + Extent +} + +func merge(extents []Extent, acc *accumulator) ([]Extent, error) { + anyResp, err := types.MarshalAny(acc.Response) + if err != nil { + return nil, err + } + return append(extents, Extent{ + Start: acc.Extent.Start, + End: acc.Extent.End, + Response: anyResp, + TraceId: acc.Extent.TraceId, + }), nil +} + +func newAccumulator(base Extent) (*accumulator, error) { + res, err := base.toResponse() + if err != nil { + return nil, err + } + return &accumulator{ + Response: res, + Extent: base, + }, nil +} + +func toExtent(ctx context.Context, req Request, res Response) (Extent, error) { + anyResp, err := types.MarshalAny(res) + if err != nil { + return Extent{}, err + } + return Extent{ + Start: req.GetStart().UnixMilli(), + End: req.GetEnd().UnixMilli(), + Response: anyResp, + TraceId: jaegerTraceID(ctx), + }, nil +} + +// partition calculates the required requests to satisfy req given the cached data. +// extents must be in order by start time. +func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) { + var requests []Request + var cachedResponses []Response + start := req.GetStart().UnixMilli() + end := req.GetEnd().UnixMilli() + + for _, extent := range extents { + // If there is no overlap, ignore this extent. + if extent.GetEnd() < start || extent.GetStart() > end { + continue + } + + // If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries. + // Hopefully tiny request can make tiny extent into not-so-tiny extent. + + // However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end. + // For example, if the step size is more than 12h and the interval is 24h. + // This means the extent's start and end time would be same, even if the timerange covers several hours. + if (req.GetStart() != req.GetEnd()) && ((end - start) > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) { + continue + } + + // If there is a bit missing at the front, make a request for that. + if start < extent.Start { + r := req.WithStartEndForCache(time.UnixMilli(start), time.UnixMilli(extent.Start)) + requests = append(requests, r) + } + res, err := extent.toResponse() + if err != nil { + return nil, nil, err + } + // extract the overlap from the cached extent. + cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd())) + start = extent.End + } + + // Lastly, make a request for any data missing at the end. + if start < req.GetEnd().UnixMilli() { + r := req.WithStartEndForCache(time.UnixMilli(start), time.UnixMilli(end)) + requests = append(requests, r) + } + + // If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query. + // But we should only do the request if we don't have a valid cached response for it. + if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 { + requests = append(requests, req) + } + + return requests, cachedResponses, nil +} + +func (s ResultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) { + step := math.Max64(1, req.GetStep()) + maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / step) * step + for i := range extents { + // Never cache data for the latest freshness period. + if extents[i].End > maxCacheTime { + extents[i].End = maxCacheTime + res, err := extents[i].toResponse() + if err != nil { + return nil, err + } + extracted := s.extractor.Extract(extents[i].GetStart(), maxCacheTime, res, extents[i].GetStart(), extents[i].GetEnd()) + anyResp, err := types.MarshalAny(extracted) + if err != nil { + return nil, err + } + extents[i].Response = anyResp + } + } + return extents, nil +} + +func (s ResultsCache) get(ctx context.Context, key string) ([]Extent, bool) { + found, bufs, _, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)}) + if len(found) != 1 { + return nil, false + } + + var resp CachedResponse + sp, ctx := opentracing.StartSpanFromContext(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck + defer sp.Finish() + log := spanlogger.FromContext(ctx) + defer log.Finish() + + log.LogFields(otlog.Int("bytes", len(bufs[0]))) + + if err := proto.Unmarshal(bufs[0], &resp); err != nil { + level.Error(log).Log("msg", "error unmarshalling cached value", "err", err) + log.Error(err) + return nil, false + } + + if resp.Key != key { + return nil, false + } + + // Refreshes the cache if it contains an old proto schema. + for _, e := range resp.Extents { + if e.Response == nil { + return nil, false + } + } + + return resp.Extents, true +} + +func (s ResultsCache) put(ctx context.Context, key string, extents []Extent) { + buf, err := proto.Marshal(&CachedResponse{ + Key: key, + Extents: extents, + }) + if err != nil { + level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err) + return + } + + _ = s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf}) +} + +func jaegerTraceID(ctx context.Context) string { + span := opentracing.SpanFromContext(ctx) + if span == nil { + return "" + } + + spanContext, ok := span.Context().(jaeger.SpanContext) + if !ok { + return "" + } + + return spanContext.TraceID().String() +} + +func (e *Extent) toResponse() (Response, error) { + msg, err := types.EmptyAny(e.Response) + if err != nil { + return nil, err + } + + if err := types.UnmarshalAny(e.Response, msg); err != nil { + return nil, err + } + + resp, ok := msg.(Response) + if !ok { + return nil, fmt.Errorf("bad cached type") + } + return resp, nil +} diff --git a/pkg/storage/chunk/cache/resultscache/cache_test.go b/pkg/storage/chunk/cache/resultscache/cache_test.go new file mode 100644 index 0000000000000..db6e9d6c8a4a1 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/cache_test.go @@ -0,0 +1,605 @@ +package resultscache + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/gogo/protobuf/types" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/user" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + + "github.com/grafana/loki/pkg/logqlmodel/stats" + "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/util/constants" +) + +const day = 24 * time.Hour + +var ( + parsedRequest = &MockRequest{ + Start: time.UnixMilli(1536673680 * 1e3), + End: time.UnixMilli(1536716898 * 1e3), + Step: 120 * 1e3, + Query: "sum(container_memory_rss) by (namespace)", + } + + parsedResponse = &MockResponse{ + Labels: []*MockLabelsPair{ + {Name: "foo", Value: "bar"}, + }, + Samples: []*MockSample{ + {Value: 137, TimestampMs: 1536673680000}, + {Value: 137, TimestampMs: 1536673780000}, + }, + } +) + +func TestPartition(t *testing.T) { + for _, tc := range []struct { + name string + input Request + prevCachedResponse []Extent + expectedRequests []Request + expectedCachedResponse []Response + }{ + { + name: "Test a complete hit.", + input: &MockRequest{ + Start: time.UnixMilli(0), + End: time.UnixMilli(100), + }, + prevCachedResponse: []Extent{ + mkExtent(0, 100), + }, + expectedCachedResponse: []Response{ + mkAPIResponse(0, 100, 10), + }, + }, + + { + name: "Test with a complete miss.", + input: &MockRequest{ + Start: time.UnixMilli(0), + End: time.UnixMilli(100), + }, + prevCachedResponse: []Extent{ + mkExtent(110, 210), + }, + expectedRequests: []Request{ + &MockRequest{ + Start: time.UnixMilli(0), + End: time.UnixMilli(100), + }, + }, + }, + { + name: "Test a partial hit.", + input: &MockRequest{ + Start: time.UnixMilli(0), + End: time.UnixMilli(100), + }, + prevCachedResponse: []Extent{ + mkExtent(50, 100), + }, + expectedRequests: []Request{ + &MockRequest{ + Start: time.UnixMilli(0), + End: time.UnixMilli(50), + }, + }, + expectedCachedResponse: []Response{ + mkAPIResponse(50, 100, 10), + }, + }, + { + name: "Test multiple partial hits.", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(200), + }, + prevCachedResponse: []Extent{ + mkExtent(50, 120), + mkExtent(160, 250), + }, + expectedRequests: []Request{ + &MockRequest{ + Start: time.UnixMilli(120), + End: time.UnixMilli(160), + }, + }, + expectedCachedResponse: []Response{ + mkAPIResponse(100, 120, 10), + mkAPIResponse(160, 200, 10), + }, + }, + { + name: "Partial hits with tiny gap.", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(160), + }, + prevCachedResponse: []Extent{ + mkExtent(50, 120), + mkExtent(122, 130), + }, + expectedRequests: []Request{ + &MockRequest{ + Start: time.UnixMilli(120), + End: time.UnixMilli(160), + }, + }, + expectedCachedResponse: []Response{ + mkAPIResponse(100, 120, 10), + }, + }, + { + name: "Extent is outside the range and the request has a single step (same start and end).", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(100), + }, + prevCachedResponse: []Extent{ + mkExtent(50, 90), + }, + expectedRequests: []Request{ + &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(100), + }, + }, + }, + { + name: "Test when hit has a large step and only a single sample extent.", + // If there is a only a single sample in the split interval, start and end will be the same. + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(100), + }, + prevCachedResponse: []Extent{ + mkExtent(100, 100), + }, + expectedCachedResponse: []Response{ + mkAPIResponse(100, 105, 10), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + s := ResultsCache{ + extractor: MockExtractor{}, + minCacheExtent: 10, + } + reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse) + require.Nil(t, err) + require.Equal(t, tc.expectedRequests, reqs) + require.Equal(t, tc.expectedCachedResponse, resps) + }) + } +} + +func TestHandleHit(t *testing.T) { + for _, tc := range []struct { + name string + input Request + cachedEntry []Extent + expectedUpdatedCachedEntry []Extent + }{ + { + name: "Should drop tiny extent that overlaps with non-tiny request only", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(120), + Step: 5, + }, + cachedEntry: []Extent{ + mkExtentWithStep(0, 50, 5), + mkExtentWithStep(60, 65, 5), + mkExtentWithStep(100, 105, 5), + mkExtentWithStep(110, 150, 5), + mkExtentWithStep(160, 165, 5), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(0, 50, 5), + mkExtentWithStep(60, 65, 5), + mkExtentWithStep(100, 150, 5), + mkExtentWithStep(160, 165, 5), + }, + }, + { + name: "Should replace tiny extents that are cover by bigger request", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(200), + Step: 5, + }, + cachedEntry: []Extent{ + mkExtentWithStep(0, 50, 5), + mkExtentWithStep(60, 65, 5), + mkExtentWithStep(100, 105, 5), + mkExtentWithStep(110, 115, 5), + mkExtentWithStep(120, 125, 5), + mkExtentWithStep(220, 225, 5), + mkExtentWithStep(240, 250, 5), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(0, 50, 5), + mkExtentWithStep(60, 65, 5), + mkExtentWithStep(100, 200, 5), + mkExtentWithStep(220, 225, 5), + mkExtentWithStep(240, 250, 5), + }, + }, + { + name: "Should not drop tiny extent that completely overlaps with tiny request", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(105), + Step: 5, + }, + cachedEntry: []Extent{ + mkExtentWithStep(0, 50, 5), + mkExtentWithStep(60, 65, 5), + mkExtentWithStep(100, 105, 5), + mkExtentWithStep(160, 165, 5), + }, + expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache + }, + { + name: "Should not drop tiny extent that partially center-overlaps with tiny request", + input: &MockRequest{ + Start: time.UnixMilli(106), + End: time.UnixMilli(108), + Step: 2, + }, + cachedEntry: []Extent{ + mkExtentWithStep(60, 64, 2), + mkExtentWithStep(104, 110, 2), + mkExtentWithStep(160, 166, 2), + }, + expectedUpdatedCachedEntry: nil, // no cache update need, request fulfilled using cache + }, + { + name: "Should not drop tiny extent that partially left-overlaps with tiny request", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(106), + Step: 2, + }, + cachedEntry: []Extent{ + mkExtentWithStep(60, 64, 2), + mkExtentWithStep(104, 110, 2), + mkExtentWithStep(160, 166, 2), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(60, 64, 2), + mkExtentWithStep(100, 110, 2), + mkExtentWithStep(160, 166, 2), + }, + }, + { + name: "Should not drop tiny extent that partially right-overlaps with tiny request", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(106), + Step: 2, + }, + cachedEntry: []Extent{ + mkExtentWithStep(60, 64, 2), + mkExtentWithStep(98, 102, 2), + mkExtentWithStep(160, 166, 2), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(60, 64, 2), + mkExtentWithStep(98, 106, 2), + mkExtentWithStep(160, 166, 2), + }, + }, + { + name: "Should merge fragmented extents if request fills the hole", + input: &MockRequest{ + Start: time.UnixMilli(40), + End: time.UnixMilli(80), + Step: 20, + }, + cachedEntry: []Extent{ + mkExtentWithStep(0, 20, 20), + mkExtentWithStep(80, 100, 20), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(0, 100, 20), + }, + }, + { + name: "Should left-extend extent if request starts earlier than extent in cache", + input: &MockRequest{ + Start: time.UnixMilli(40), + End: time.UnixMilli(80), + Step: 20, + }, + cachedEntry: []Extent{ + mkExtentWithStep(60, 160, 20), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(40, 160, 20), + }, + }, + { + name: "Should right-extend extent if request ends later than extent in cache", + input: &MockRequest{ + Start: time.UnixMilli(100), + End: time.UnixMilli(180), + Step: 20, + }, + cachedEntry: []Extent{ + mkExtentWithStep(60, 160, 20), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(60, 180, 20), + }, + }, + { + name: "Should not throw error if complete-overlapped smaller Extent is erroneous", + input: &MockRequest{ + // This request is carefully crated such that cachedEntry is not used to fulfill + // the request. + Start: time.UnixMilli(160), + End: time.UnixMilli(180), + Step: 20, + }, + cachedEntry: []Extent{ + { + Start: 60, + End: 80, + + // if the optimization of "sorting by End when Start of 2 Extents are equal" is not there, this nil + // response would cause error during Extents merge phase. With the optimization + // this bad Extent should be dropped. The good Extent below can be used instead. + Response: nil, + }, + mkExtentWithStep(60, 160, 20), + }, + expectedUpdatedCachedEntry: []Extent{ + mkExtentWithStep(60, 180, 20), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + sut := ResultsCache{ + extractor: MockExtractor{}, + minCacheExtent: 10, + limits: mockLimits{}, + merger: MockMerger{}, + parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 }, + next: HandlerFunc(func(_ context.Context, req Request) (Response, error) { + return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil + }), + } + + ctx := user.InjectOrgID(context.Background(), "1") + response, updatedExtents, err := sut.handleHit(ctx, tc.input, tc.cachedEntry, 0) + require.NoError(t, err) + + expectedResponse := mkAPIResponse(tc.input.GetStart().UnixMilli(), tc.input.GetEnd().UnixMilli(), tc.input.GetStep()) + require.Equal(t, expectedResponse, response, "response does not match the expectation") + require.Equal(t, tc.expectedUpdatedCachedEntry, updatedExtents, "updated cache entry does not match the expectation") + }) + } +} + +func TestResultsCacheMaxFreshness(t *testing.T) { + modelNow := model.Now() + for i, tc := range []struct { + fakeLimits Limits + Handler HandlerFunc + expectedResponse *MockResponse + }{ + { + fakeLimits: mockLimits{maxCacheFreshness: 5 * time.Second}, + Handler: nil, + expectedResponse: mkAPIResponse(int64(modelNow)-(50*1e3), int64(modelNow)-(10*1e3), 10), + }, + { + // should not lookup cache because per-tenant override will be applied + fakeLimits: mockLimits{maxCacheFreshness: 10 * time.Minute}, + Handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { + return parsedResponse, nil + }), + expectedResponse: parsedResponse, + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + var cfg Config + flagext.DefaultValues(&cfg) + cfg.CacheConfig.Cache = cache.NewMockCache() + c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) + require.NoError(t, err) + fakeLimits := tc.fakeLimits + rc := NewResultsCache( + log.NewNopLogger(), + c, + tc.Handler, + ConstSplitter(day), + fakeLimits, + MockMerger{}, + MockExtractor{}, + nil, + nil, + func(_ context.Context, tenantIDs []string, r Request) int { + return 10 + }, + nil, + false, + ) + require.NoError(t, err) + + // create cache with handler + ctx := user.InjectOrgID(context.Background(), "1") + + // create request with start end within the key extents + req := parsedRequest.WithStartEndForCache(time.UnixMilli(int64(modelNow)-(50*1e3)), time.UnixMilli(int64(modelNow)-(10*1e3))) + + // fill cache + key := ConstSplitter(day).GenerateCacheKey(context.Background(), "1", req) + rc.put(ctx, key, []Extent{mkExtent(int64(modelNow)-(600*1e3), int64(modelNow))}) + + resp, err := rc.Do(ctx, req) + require.NoError(t, err) + require.Equal(t, tc.expectedResponse, resp) + }) + } +} + +func Test_resultsCache_MissingData(t *testing.T) { + cfg := Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, + } + c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) + require.NoError(t, err) + rc := NewResultsCache( + log.NewNopLogger(), + c, + nil, + ConstSplitter(day), + mockLimits{}, + MockMerger{}, + MockExtractor{}, + nil, + nil, + func(_ context.Context, tenantIDs []string, r Request) int { + return 10 + }, + nil, + false, + ) + require.NoError(t, err) + ctx := context.Background() + + // fill up the cache + rc.put(ctx, "empty", []Extent{{ + Start: 100, + End: 200, + Response: nil, + }}) + rc.put(ctx, "notempty", []Extent{mkExtent(100, 120)}) + rc.put(ctx, "mixed", []Extent{mkExtent(100, 120), { + Start: 120, + End: 200, + Response: nil, + }}) + + extents, hit := rc.get(ctx, "empty") + require.Empty(t, extents) + require.False(t, hit) + + extents, hit = rc.get(ctx, "notempty") + require.Equal(t, len(extents), 1) + require.True(t, hit) + + extents, hit = rc.get(ctx, "mixed") + require.Equal(t, len(extents), 0) + require.False(t, hit) +} + +func mkAPIResponse(start, end, step int64) *MockResponse { + var samples []*MockSample + for i := start; i <= end; i += step { + samples = append(samples, &MockSample{ + TimestampMs: i, + Value: float64(i), + }) + } + + return &MockResponse{ + Labels: []*MockLabelsPair{ + {Name: "foo", Value: "bar"}, + }, + Samples: samples, + } +} + +func mkExtent(start, end int64) Extent { + return mkExtentWithStep(start, end, 10) +} + +func mkExtentWithStep(start, end, step int64) Extent { + res := mkAPIResponse(start, end, step) + anyRes, err := types.MarshalAny(res) + if err != nil { + panic(err) + } + return Extent{ + Start: start, + End: end, + Response: anyRes, + } +} + +func (r *MockRequest) WithStartEndForCache(start time.Time, end time.Time) Request { + m := *r + m.Start = start + m.End = end + return &m +} + +type MockMerger struct{} + +func (m MockMerger) MergeResponse(responses ...Response) (Response, error) { + samples := make([]*MockSample, 0, len(responses)*2) + for _, response := range responses { + samples = append(samples, response.(*MockResponse).Samples...) + } + + // Merge samples by: + // 1. Sorting them by time. + // 2. Removing duplicates. + slices.SortFunc(samples, func(a, b *MockSample) int { + if a.TimestampMs == b.TimestampMs { + return 0 + } + if a.TimestampMs < b.TimestampMs { + return -1 + } + return 1 + }) + samples = slices.CompactFunc(samples, func(a, b *MockSample) bool { + return a.TimestampMs == b.TimestampMs + }) + + return &MockResponse{ + Labels: responses[0].(*MockResponse).Labels, + Samples: samples, + }, nil +} + +type MockExtractor struct{} + +func (m MockExtractor) Extract(start, end int64, res Response, _, _ int64) Response { + mockRes := res.(*MockResponse) + + result := MockResponse{ + Labels: mockRes.Labels, + Samples: make([]*MockSample, 0, len(mockRes.Samples)), + } + + for _, sample := range mockRes.Samples { + if start <= sample.TimestampMs && sample.TimestampMs <= end { + result.Samples = append(result.Samples, sample) + } + } + return &result +} + +type mockLimits struct { + maxCacheFreshness time.Duration +} + +func (m mockLimits) MaxCacheFreshness(context.Context, string) time.Duration { + return m.maxCacheFreshness +} diff --git a/pkg/storage/chunk/cache/resultscache/config.go b/pkg/storage/chunk/cache/resultscache/config.go new file mode 100644 index 0000000000000..5a329168e8372 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/config.go @@ -0,0 +1,45 @@ +package resultscache + +import ( + "context" + "flag" + "time" + + "github.com/pkg/errors" + + "github.com/grafana/loki/pkg/storage/chunk/cache" +) + +// Config is the config for the results cache. +type Config struct { + CacheConfig cache.Config `yaml:"cache"` + Compression string `yaml:"compression"` +} + +func (cfg *Config) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + cfg.CacheConfig.RegisterFlagsWithPrefix(prefix, "", f) + f.StringVar(&cfg.Compression, prefix+"compression", "", "Use compression in cache. The default is an empty value '', which disables compression. Supported values are: 'snappy' and ''.") +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix(f, "") +} + +func (cfg *Config) Validate() error { + switch cfg.Compression { + case "snappy", "": + // valid + default: + return errors.Errorf("unsupported compression type: %s", cfg.Compression) + } + + if !cache.IsCacheConfigured(cfg.CacheConfig) { + return errors.New("no cache configured") + } + + return nil +} + +type Limits interface { + MaxCacheFreshness(ctx context.Context, tenantID string) time.Duration +} diff --git a/pkg/storage/chunk/cache/resultscache/interface.go b/pkg/storage/chunk/cache/resultscache/interface.go new file mode 100644 index 0000000000000..7d359c9628583 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/interface.go @@ -0,0 +1,56 @@ +package resultscache + +import ( + "context" + "time" + + "github.com/gogo/protobuf/proto" +) + +type Request interface { + proto.Message + // GetStart returns the start timestamp of the request in milliseconds. + GetStart() time.Time + // GetEnd returns the end timestamp of the request in milliseconds. + GetEnd() time.Time + // GetStep returns the step of the request in milliseconds. + GetStep() int64 + // GetQuery returns the query of the request. + GetQuery() string + // GetCachingOptions returns the caching options. + GetCachingOptions() CachingOptions + // WithStartEndForCache clone the current request with different start and end timestamp. + WithStartEndForCache(start time.Time, end time.Time) Request +} + +type Response interface { + proto.Message +} + +// ResponseMerger is used by middlewares making multiple requests to merge back all responses into a single one. +type ResponseMerger interface { + // MergeResponse merges responses from multiple requests into a single Response + MergeResponse(...Response) (Response, error) +} + +type Handler interface { + Do(ctx context.Context, req Request) (Response, error) +} + +// Extractor is used by the cache to extract a subset of a response from a cache entry. +type Extractor interface { + // Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds + // in the `res` response which spans from `resStart` to `resEnd`. + Extract(start, end int64, res Response, resStart, resEnd int64) Response +} + +// KeyGenerator generates cache keys. This is a useful interface for downstream +// consumers who wish to implement their own strategies. +type KeyGenerator interface { + GenerateCacheKey(ctx context.Context, userID string, r Request) string +} + +type CacheGenNumberLoader interface { + GetResultsCacheGenNumber(tenantIDs []string) string + Stop() +} diff --git a/pkg/storage/chunk/cache/resultscache/test_types.pb.go b/pkg/storage/chunk/cache/resultscache/test_types.pb.go new file mode 100644 index 0000000000000..7d3a54864e3df --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/test_types.pb.go @@ -0,0 +1,1520 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/storage/chunk/cache/resultscache/test_types.proto + +package resultscache + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MockRequest struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` + Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` + Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"` + CachingOptions CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"` +} + +func (m *MockRequest) Reset() { *m = MockRequest{} } +func (*MockRequest) ProtoMessage() {} +func (*MockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5b2c489557407809, []int{0} +} +func (m *MockRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MockRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MockRequest.Merge(m, src) +} +func (m *MockRequest) XXX_Size() int { + return m.Size() +} +func (m *MockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MockRequest proto.InternalMessageInfo + +func (m *MockRequest) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *MockRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *MockRequest) GetEnd() time.Time { + if m != nil { + return m.End + } + return time.Time{} +} + +func (m *MockRequest) GetStep() int64 { + if m != nil { + return m.Step + } + return 0 +} + +func (m *MockRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *MockRequest) GetCachingOptions() CachingOptions { + if m != nil { + return m.CachingOptions + } + return CachingOptions{} +} + +type MockResponse struct { + Labels []*MockLabelsPair `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + Samples []*MockSample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples,omitempty"` +} + +func (m *MockResponse) Reset() { *m = MockResponse{} } +func (*MockResponse) ProtoMessage() {} +func (*MockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5b2c489557407809, []int{1} +} +func (m *MockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MockResponse.Merge(m, src) +} +func (m *MockResponse) XXX_Size() int { + return m.Size() +} +func (m *MockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MockResponse proto.InternalMessageInfo + +func (m *MockResponse) GetLabels() []*MockLabelsPair { + if m != nil { + return m.Labels + } + return nil +} + +func (m *MockResponse) GetSamples() []*MockSample { + if m != nil { + return m.Samples + } + return nil +} + +type MockLabelsPair struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *MockLabelsPair) Reset() { *m = MockLabelsPair{} } +func (*MockLabelsPair) ProtoMessage() {} +func (*MockLabelsPair) Descriptor() ([]byte, []int) { + return fileDescriptor_5b2c489557407809, []int{2} +} +func (m *MockLabelsPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MockLabelsPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MockLabelsPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MockLabelsPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_MockLabelsPair.Merge(m, src) +} +func (m *MockLabelsPair) XXX_Size() int { + return m.Size() +} +func (m *MockLabelsPair) XXX_DiscardUnknown() { + xxx_messageInfo_MockLabelsPair.DiscardUnknown(m) +} + +var xxx_messageInfo_MockLabelsPair proto.InternalMessageInfo + +func (m *MockLabelsPair) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MockLabelsPair) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type MockSample struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` +} + +func (m *MockSample) Reset() { *m = MockSample{} } +func (*MockSample) ProtoMessage() {} +func (*MockSample) Descriptor() ([]byte, []int) { + return fileDescriptor_5b2c489557407809, []int{3} +} +func (m *MockSample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MockSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MockSample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MockSample) XXX_Merge(src proto.Message) { + xxx_messageInfo_MockSample.Merge(m, src) +} +func (m *MockSample) XXX_Size() int { + return m.Size() +} +func (m *MockSample) XXX_DiscardUnknown() { + xxx_messageInfo_MockSample.DiscardUnknown(m) +} + +var xxx_messageInfo_MockSample proto.InternalMessageInfo + +func (m *MockSample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *MockSample) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +func init() { + proto.RegisterType((*MockRequest)(nil), "resultscache.MockRequest") + proto.RegisterType((*MockResponse)(nil), "resultscache.MockResponse") + proto.RegisterType((*MockLabelsPair)(nil), "resultscache.MockLabelsPair") + proto.RegisterType((*MockSample)(nil), "resultscache.MockSample") +} + +func init() { + proto.RegisterFile("pkg/storage/chunk/cache/resultscache/test_types.proto", fileDescriptor_5b2c489557407809) +} + +var fileDescriptor_5b2c489557407809 = []byte{ + // 462 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x3f, 0x6f, 0x13, 0x31, + 0x14, 0x3f, 0xe7, 0xd2, 0x94, 0x3a, 0x51, 0x07, 0xab, 0xc3, 0x29, 0x42, 0x4e, 0xc8, 0x94, 0xe9, + 0x2c, 0x95, 0x3f, 0x43, 0xc5, 0x14, 0xc4, 0x82, 0xa8, 0x40, 0x86, 0x89, 0xa5, 0x72, 0x0e, 0xd7, + 0x39, 0xe5, 0xee, 0xec, 0xde, 0xf3, 0x21, 0xba, 0xb1, 0xb3, 0xf4, 0x63, 0xf0, 0x51, 0x3a, 0x66, + 0xec, 0x04, 0xe4, 0xb2, 0x30, 0xf6, 0x23, 0x20, 0xfb, 0x92, 0x36, 0xa5, 0x0b, 0xdd, 0xde, 0xf3, + 0xfb, 0xfd, 0xb1, 0x7e, 0xef, 0xe1, 0xe7, 0x66, 0xae, 0x18, 0x58, 0x5d, 0x0a, 0x25, 0x59, 0x32, + 0xab, 0x8a, 0x39, 0x4b, 0x44, 0x32, 0x93, 0xac, 0x94, 0x50, 0x65, 0x16, 0x9a, 0xc6, 0x4a, 0xb0, + 0x27, 0xf6, 0xdc, 0x48, 0x88, 0x4d, 0xa9, 0xad, 0x26, 0xbd, 0xed, 0x71, 0xff, 0x40, 0x69, 0xa5, + 0xfd, 0x80, 0xb9, 0xaa, 0xc1, 0xf4, 0x07, 0x4a, 0x6b, 0x95, 0x49, 0xe6, 0xbb, 0x69, 0x75, 0xca, + 0x6c, 0x9a, 0x4b, 0xb0, 0x22, 0x37, 0x6b, 0x40, 0x77, 0x4b, 0x71, 0xf4, 0xbd, 0x85, 0xbb, 0xc7, + 0x3a, 0x99, 0x73, 0x79, 0x56, 0x49, 0xb0, 0x84, 0xe0, 0xb6, 0x11, 0x76, 0x16, 0xa1, 0x21, 0x1a, + 0xef, 0x71, 0x5f, 0x93, 0x23, 0xbc, 0x03, 0x56, 0x94, 0x36, 0x6a, 0x0d, 0xd1, 0xb8, 0x7b, 0xd8, + 0x8f, 0x1b, 0x87, 0x78, 0xe3, 0x10, 0x7f, 0xdc, 0x38, 0x4c, 0x1e, 0x5d, 0xfe, 0x1c, 0x04, 0x17, + 0xbf, 0x06, 0x88, 0x37, 0x14, 0xf2, 0x02, 0x87, 0xb2, 0xf8, 0x1c, 0x85, 0x0f, 0x60, 0x3a, 0x82, + 0xfb, 0x07, 0x58, 0x69, 0xa2, 0xf6, 0x10, 0x8d, 0x43, 0xee, 0x6b, 0x72, 0x80, 0x77, 0xce, 0x2a, + 0x59, 0x9e, 0x47, 0x1d, 0xff, 0xb9, 0xa6, 0x21, 0x6f, 0xf0, 0xbe, 0x8b, 0x23, 0x2d, 0xd4, 0x3b, + 0x63, 0x53, 0x5d, 0x40, 0xb4, 0xeb, 0xcd, 0x1e, 0xc7, 0xdb, 0x61, 0xc5, 0xaf, 0xee, 0x60, 0x26, + 0x6d, 0x67, 0xc7, 0xff, 0x61, 0x8e, 0xbe, 0xe2, 0x5e, 0x13, 0x06, 0x18, 0x5d, 0x80, 0x24, 0xcf, + 0x70, 0x27, 0x13, 0x53, 0x99, 0x41, 0x84, 0x86, 0xe1, 0x7d, 0x4d, 0x87, 0x7d, 0xeb, 0xe7, 0xef, + 0x45, 0x5a, 0xf2, 0x35, 0x96, 0x1c, 0xe2, 0x5d, 0x10, 0xb9, 0xc9, 0x24, 0x44, 0x2d, 0x4f, 0x8b, + 0xee, 0xd3, 0x3e, 0x78, 0x00, 0xdf, 0x00, 0x47, 0x47, 0x78, 0xff, 0xae, 0x9a, 0x4b, 0xa0, 0x10, + 0xb9, 0xdc, 0x6c, 0xc2, 0xd5, 0x2e, 0x81, 0x2f, 0x22, 0xab, 0xa4, 0xdf, 0xc4, 0x1e, 0x6f, 0x9a, + 0xd1, 0x6b, 0x8c, 0x6f, 0x25, 0x6f, 0x31, 0x8e, 0x88, 0xd6, 0x18, 0xf2, 0x04, 0xf7, 0x6e, 0xee, + 0xe0, 0x24, 0x07, 0x2f, 0x10, 0xf2, 0xee, 0xcd, 0xdb, 0x31, 0x4c, 0xca, 0xc5, 0x92, 0x06, 0x57, + 0x4b, 0x1a, 0x5c, 0x2f, 0x29, 0xfa, 0x56, 0x53, 0xf4, 0xa3, 0xa6, 0xe8, 0xb2, 0xa6, 0x68, 0x51, + 0x53, 0xf4, 0xbb, 0xa6, 0xe8, 0x4f, 0x4d, 0x83, 0xeb, 0x9a, 0xa2, 0x8b, 0x15, 0x0d, 0x16, 0x2b, + 0x1a, 0x5c, 0xad, 0x68, 0xf0, 0xe9, 0xa5, 0x4a, 0xed, 0xac, 0x9a, 0xc6, 0x89, 0xce, 0x99, 0x2a, + 0xc5, 0xa9, 0x28, 0x04, 0xcb, 0xf4, 0x3c, 0x65, 0xff, 0x73, 0xe1, 0xd3, 0x8e, 0xbf, 0x84, 0xa7, + 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x20, 0x73, 0x6a, 0xfb, 0x10, 0x03, 0x00, 0x00, +} + +func (this *MockRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MockRequest) + if !ok { + that2, ok := that.(MockRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + if !this.End.Equal(that1.End) { + return false + } + if this.Step != that1.Step { + return false + } + if this.Query != that1.Query { + return false + } + if !this.CachingOptions.Equal(&that1.CachingOptions) { + return false + } + return true +} +func (this *MockResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MockResponse) + if !ok { + that2, ok := that.(MockResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(that1.Samples[i]) { + return false + } + } + return true +} +func (this *MockLabelsPair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MockLabelsPair) + if !ok { + that2, ok := that.(MockLabelsPair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *MockSample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MockSample) + if !ok { + that2, ok := that.(MockSample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if this.TimestampMs != that1.TimestampMs { + return false + } + return true +} +func (this *MockRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&resultscache.MockRequest{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "Step: "+fmt.Sprintf("%#v", this.Step)+",\n") + s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") + s = append(s, "CachingOptions: "+strings.Replace(this.CachingOptions.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MockResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&resultscache.MockResponse{") + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + if this.Samples != nil { + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MockLabelsPair) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&resultscache.MockLabelsPair{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MockSample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&resultscache.MockSample{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTestTypes(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *MockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CachingOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTestTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x32 + } + if m.Step != 0 { + i = encodeVarintTestTypes(dAtA, i, uint64(m.Step)) + i-- + dAtA[i] = 0x20 + } + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintTestTypes(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintTestTypes(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x12 + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTestTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTestTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MockLabelsPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MockLabelsPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MockLabelsPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTestTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MockSample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MockSample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MockSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimestampMs != 0 { + i = encodeVarintTestTypes(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func encodeVarintTestTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTestTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovTestTypes(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovTestTypes(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovTestTypes(uint64(l)) + if m.Step != 0 { + n += 1 + sovTestTypes(uint64(m.Step)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sovTestTypes(uint64(l)) + } + l = m.CachingOptions.Size() + n += 1 + l + sovTestTypes(uint64(l)) + return n +} + +func (m *MockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTestTypes(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovTestTypes(uint64(l)) + } + } + return n +} + +func (m *MockLabelsPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTestTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTestTypes(uint64(l)) + } + return n +} + +func (m *MockSample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.TimestampMs != 0 { + n += 1 + sovTestTypes(uint64(m.TimestampMs)) + } + return n +} + +func sovTestTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTestTypes(x uint64) (n int) { + return sovTestTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MockRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MockRequest{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Step:` + fmt.Sprintf("%v", this.Step) + `,`, + `Query:` + fmt.Sprintf("%v", this.Query) + `,`, + `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "CachingOptions", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MockResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForLabels := "[]*MockLabelsPair{" + for _, f := range this.Labels { + repeatedStringForLabels += strings.Replace(f.String(), "MockLabelsPair", "MockLabelsPair", 1) + "," + } + repeatedStringForLabels += "}" + repeatedStringForSamples := "[]*MockSample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(f.String(), "MockSample", "MockSample", 1) + "," + } + repeatedStringForSamples += "}" + s := strings.Join([]string{`&MockResponse{`, + `Labels:` + repeatedStringForLabels + `,`, + `Samples:` + repeatedStringForSamples + `,`, + `}`, + }, "") + return s +} +func (this *MockLabelsPair) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MockLabelsPair{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *MockSample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MockSample{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, + `}`, + }, "") + return s +} +func valueToStringTestTypes(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CachingOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTestTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, &MockLabelsPair{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, &MockSample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTestTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MockLabelsPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MockLabelsPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MockLabelsPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTestTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTestTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTestTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MockSample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MockSample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MockSample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTestTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTestTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTestTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTestTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTestTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTestTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTestTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTestTypes + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTestTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTestTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTestTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTestTypes + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTestTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTestTypes = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/storage/chunk/cache/resultscache/test_types.proto b/pkg/storage/chunk/cache/resultscache/test_types.proto new file mode 100644 index 0000000000000..920db66314de4 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/test_types.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package resultscache; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "types.proto"; + +option go_package = "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message MockRequest { + string path = 1; + google.protobuf.Timestamp start = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp end = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + int64 step = 4; + string query = 6; + CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false]; +} + +message MockResponse { + repeated MockLabelsPair labels = 1; + repeated MockSample samples = 2; +} + +message MockLabelsPair { + string name = 1; + string value = 2; +} + +message MockSample { + double value = 1; + int64 timestamp_ms = 2; +} diff --git a/pkg/storage/chunk/cache/resultscache/types.pb.go b/pkg/storage/chunk/cache/resultscache/types.pb.go new file mode 100644 index 0000000000000..7c63abdda4bf6 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/types.pb.go @@ -0,0 +1,1078 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/storage/chunk/cache/resultscache/types.proto + +package resultscache + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Defined here to prevent circular imports between logproto & queryrangebase +type CachingOptions struct { + Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *CachingOptions) Reset() { *m = CachingOptions{} } +func (*CachingOptions) ProtoMessage() {} +func (*CachingOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_6b13efd4ce8649ef, []int{0} +} +func (m *CachingOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CachingOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CachingOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CachingOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CachingOptions.Merge(m, src) +} +func (m *CachingOptions) XXX_Size() int { + return m.Size() +} +func (m *CachingOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CachingOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_CachingOptions proto.InternalMessageInfo + +func (m *CachingOptions) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +type CachedResponse struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"` + // List of cached responses; non-overlapping and in order. + Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"` +} + +func (m *CachedResponse) Reset() { *m = CachedResponse{} } +func (*CachedResponse) ProtoMessage() {} +func (*CachedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b13efd4ce8649ef, []int{1} +} +func (m *CachedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CachedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CachedResponse.Merge(m, src) +} +func (m *CachedResponse) XXX_Size() int { + return m.Size() +} +func (m *CachedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CachedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CachedResponse proto.InternalMessageInfo + +func (m *CachedResponse) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *CachedResponse) GetExtents() []Extent { + if m != nil { + return m.Extents + } + return nil +} + +type Extent struct { + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"` + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"` + TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"` + Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"` +} + +func (m *Extent) Reset() { *m = Extent{} } +func (*Extent) ProtoMessage() {} +func (*Extent) Descriptor() ([]byte, []int) { + return fileDescriptor_6b13efd4ce8649ef, []int{2} +} +func (m *Extent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Extent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Extent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Extent.Merge(m, src) +} +func (m *Extent) XXX_Size() int { + return m.Size() +} +func (m *Extent) XXX_DiscardUnknown() { + xxx_messageInfo_Extent.DiscardUnknown(m) +} + +var xxx_messageInfo_Extent proto.InternalMessageInfo + +func (m *Extent) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Extent) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *Extent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +func (m *Extent) GetResponse() *types.Any { + if m != nil { + return m.Response + } + return nil +} + +func init() { + proto.RegisterType((*CachingOptions)(nil), "resultscache.CachingOptions") + proto.RegisterType((*CachedResponse)(nil), "resultscache.CachedResponse") + proto.RegisterType((*Extent)(nil), "resultscache.Extent") +} + +func init() { + proto.RegisterFile("pkg/storage/chunk/cache/resultscache/types.proto", fileDescriptor_6b13efd4ce8649ef) +} + +var fileDescriptor_6b13efd4ce8649ef = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xb1, 0x6e, 0xd4, 0x30, + 0x18, 0xc7, 0xe3, 0xde, 0x5d, 0x2f, 0x35, 0x15, 0xa0, 0xa8, 0x43, 0x7a, 0x83, 0x73, 0xba, 0xa9, + 0x03, 0xc4, 0x08, 0x56, 0x04, 0x22, 0x88, 0x01, 0x16, 0x24, 0x8f, 0x2c, 0xc8, 0x49, 0x5c, 0x27, + 0x4a, 0xb0, 0x23, 0xdb, 0x91, 0xc8, 0xc6, 0x23, 0xf0, 0x18, 0x0c, 0x3c, 0x48, 0xc7, 0x1b, 0x3b, + 0x45, 0x5c, 0x6e, 0x41, 0x99, 0xfa, 0x08, 0x28, 0x36, 0x77, 0xea, 0xd8, 0xe5, 0xf3, 0xf7, 0xff, + 0xfe, 0x7f, 0xc9, 0x3f, 0x7f, 0x32, 0x7c, 0xd1, 0x54, 0x1c, 0x6b, 0x23, 0x15, 0xe5, 0x0c, 0x67, + 0x45, 0x2b, 0x2a, 0x9c, 0xd1, 0xac, 0x60, 0x58, 0x31, 0xdd, 0xd6, 0x46, 0x3b, 0x61, 0xba, 0x86, + 0xe9, 0xb8, 0x51, 0xd2, 0xc8, 0xe0, 0xfc, 0xbe, 0xb3, 0xba, 0xe0, 0x92, 0x4b, 0x6b, 0xe0, 0xa9, + 0x73, 0x99, 0xd5, 0x25, 0x97, 0x92, 0xd7, 0x0c, 0x5b, 0x95, 0xb6, 0xd7, 0x98, 0x8a, 0xce, 0x59, + 0x9b, 0x67, 0xf0, 0xf1, 0x7b, 0x9a, 0x15, 0xa5, 0xe0, 0x9f, 0x1b, 0x53, 0x4a, 0xa1, 0x83, 0x15, + 0xf4, 0xf3, 0x52, 0xd3, 0xb4, 0x66, 0x79, 0x08, 0xd6, 0xe0, 0xca, 0x27, 0x47, 0xbd, 0xa9, 0x5d, + 0x9a, 0xe5, 0x84, 0xe9, 0x46, 0x0a, 0xcd, 0x82, 0x4b, 0x38, 0xab, 0x58, 0x67, 0x83, 0x67, 0xc9, + 0x72, 0xec, 0xa3, 0x49, 0x92, 0xa9, 0x04, 0x6f, 0xe1, 0x92, 0x7d, 0x37, 0x4c, 0x18, 0x1d, 0x9e, + 0xac, 0x67, 0x57, 0x8f, 0x5e, 0x5e, 0xc4, 0xf7, 0x59, 0xe3, 0x0f, 0xd6, 0x4c, 0x9e, 0xdc, 0xf4, + 0x91, 0x37, 0xf6, 0xd1, 0x21, 0x4c, 0x0e, 0xcd, 0xe6, 0x37, 0x80, 0xa7, 0x2e, 0x14, 0x44, 0x70, + 0xa1, 0x0d, 0x55, 0xc6, 0x5e, 0x34, 0x4b, 0xce, 0xc6, 0x3e, 0x72, 0x03, 0xe2, 0x8e, 0x89, 0x83, + 0x89, 0x3c, 0x3c, 0xb1, 0xb6, 0xe5, 0x60, 0x22, 0x27, 0x53, 0x09, 0xd6, 0xd0, 0x37, 0x8a, 0x66, + 0xec, 0x6b, 0x99, 0x87, 0x73, 0xcb, 0xb9, 0x18, 0xfb, 0x08, 0x3c, 0x27, 0x4b, 0x3b, 0xfe, 0x98, + 0x07, 0x6f, 0xa0, 0xaf, 0xfe, 0x3f, 0x28, 0x5c, 0xac, 0x81, 0x45, 0x75, 0x2b, 0x8b, 0x0f, 0x2b, + 0x8b, 0xdf, 0x89, 0x2e, 0x39, 0x1f, 0xfb, 0xe8, 0x98, 0x24, 0xc7, 0xee, 0xd3, 0xdc, 0x9f, 0x3d, + 0x9d, 0x27, 0x6a, 0xbb, 0x43, 0xde, 0xed, 0x0e, 0x79, 0x77, 0x3b, 0x04, 0x7e, 0x0c, 0x08, 0xfc, + 0x1a, 0x10, 0xb8, 0x19, 0x10, 0xd8, 0x0e, 0x08, 0xfc, 0x19, 0x10, 0xf8, 0x3b, 0x20, 0xef, 0x6e, + 0x40, 0xe0, 0xe7, 0x1e, 0x79, 0xdb, 0x3d, 0xf2, 0x6e, 0xf7, 0xc8, 0xfb, 0xf2, 0x9a, 0x97, 0xa6, + 0x68, 0xd3, 0x38, 0x93, 0xdf, 0x30, 0x57, 0xf4, 0x9a, 0x0a, 0x8a, 0x6b, 0x59, 0x95, 0xf8, 0x21, + 0x3f, 0x21, 0x3d, 0xb5, 0x7c, 0xaf, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xc6, 0x6f, 0x43, + 0x38, 0x02, 0x00, 0x00, +} + +func (this *CachingOptions) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CachingOptions) + if !ok { + that2, ok := that.(CachingOptions) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Disabled != that1.Disabled { + return false + } + return true +} +func (this *CachedResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CachedResponse) + if !ok { + that2, ok := that.(CachedResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Key != that1.Key { + return false + } + if len(this.Extents) != len(that1.Extents) { + return false + } + for i := range this.Extents { + if !this.Extents[i].Equal(&that1.Extents[i]) { + return false + } + } + return true +} +func (this *Extent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Extent) + if !ok { + that2, ok := that.(Extent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Start != that1.Start { + return false + } + if this.End != that1.End { + return false + } + if this.TraceId != that1.TraceId { + return false + } + if !this.Response.Equal(that1.Response) { + return false + } + return true +} +func (this *CachingOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&resultscache.CachingOptions{") + s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CachedResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&resultscache.CachedResponse{") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + if this.Extents != nil { + vs := make([]*Extent, len(this.Extents)) + for i := range vs { + vs[i] = &this.Extents[i] + } + s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Extent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&resultscache.Extent{") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + if this.Response != nil { + s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTypes(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CachingOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CachingOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CachingOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CachedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extents) > 0 { + for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Extent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x22 + } + if m.End != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CachingOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Disabled { + n += 2 + } + return n +} + +func (m *CachedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Extents) > 0 { + for _, e := range m.Extents { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Extent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sovTypes(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovTypes(uint64(m.End)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CachingOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CachingOptions{`, + `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, + `}`, + }, "") + return s +} +func (this *CachedResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForExtents := "[]Extent{" + for _, f := range this.Extents { + repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + "," + } + repeatedStringForExtents += "}" + s := strings.Join([]string{`&CachedResponse{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Extents:` + repeatedStringForExtents + `,`, + `}`, + }, "") + return s +} +func (this *Extent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Extent{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringTypes(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CachingOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CachingOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CachingOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CachedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extents = append(m.Extents, Extent{}) + if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Extent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Extent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &types.Any{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/storage/chunk/cache/resultscache/types.proto b/pkg/storage/chunk/cache/resultscache/types.proto new file mode 100644 index 0000000000000..835950a0581e7 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/types.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package resultscache; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +option go_package = "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Defined here to prevent circular imports between logproto & queryrangebase +message CachingOptions { + bool disabled = 1; +} + +message CachedResponse { + string key = 1 [(gogoproto.jsontag) = "key"]; + + // List of cached responses; non-overlapping and in order. + repeated Extent extents = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "extents" + ]; +} + +message Extent { + int64 start = 1 [(gogoproto.jsontag) = "start"]; + int64 end = 2 [(gogoproto.jsontag) = "end"]; + // reserved the previous key to ensure cache transition + reserved 3; + string trace_id = 4 [(gogoproto.jsontag) = "-"]; + google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"]; +} diff --git a/pkg/storage/chunk/cache/resultscache/util.go b/pkg/storage/chunk/cache/resultscache/util.go new file mode 100644 index 0000000000000..eedc14a1f0b7f --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/util.go @@ -0,0 +1,67 @@ +package resultscache + +import ( + "context" +) + +type HandlerFunc func(context.Context, Request) (Response, error) + +// Do implements Handler. +func (q HandlerFunc) Do(ctx context.Context, req Request) (Response, error) { + return q(ctx, req) +} + +// RequestResponse contains a request response and the respective request that was used. +type RequestResponse struct { + Request Request + Response Response +} + +// DoRequests executes a list of requests in parallel. +func DoRequests(ctx context.Context, downstream Handler, reqs []Request, parallelism int) ([]RequestResponse, error) { + // If one of the requests fail, we want to be able to cancel the rest of them. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Feed all requests to a bounded intermediate channel to limit parallelism. + intermediate := make(chan Request) + go func() { + for _, req := range reqs { + intermediate <- req + } + close(intermediate) + }() + + respChan, errChan := make(chan RequestResponse), make(chan error) + if parallelism > len(reqs) { + parallelism = len(reqs) + } + for i := 0; i < parallelism; i++ { + go func() { + for req := range intermediate { + resp, err := downstream.Do(ctx, req) + if err != nil { + errChan <- err + } else { + respChan <- RequestResponse{req, resp} + } + } + }() + } + + resps := make([]RequestResponse, 0, len(reqs)) + var firstErr error + for range reqs { + select { + case resp := <-respChan: + resps = append(resps, resp) + case err := <-errChan: + if firstErr == nil { + cancel() + firstErr = err + } + } + } + + return resps, firstErr +} diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go index d21513f1150b9..0c2136801f812 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client.go @@ -405,6 +405,7 @@ func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object func (a *S3ObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { var storageObjects []client.StorageObject var commonPrefixes []client.StorageCommonPrefix + var commonPrefixesSet = make(map[string]bool) for i := range a.bucketNames { err := loki_instrument.TimeRequest(ctx, "S3.List", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { @@ -428,7 +429,10 @@ func (a *S3ObjectClient) List(ctx context.Context, prefix, delimiter string) ([] } for _, commonPrefix := range output.CommonPrefixes { - commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(aws.StringValue(commonPrefix.Prefix))) + if !commonPrefixesSet[aws.StringValue(commonPrefix.Prefix)] { + commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(aws.StringValue(commonPrefix.Prefix))) + commonPrefixesSet[aws.StringValue(commonPrefix.Prefix)] = true + } } if output.IsTruncated == nil || !*output.IsTruncated { diff --git a/pkg/storage/chunk/client/aws/s3_storage_client_test.go b/pkg/storage/chunk/client/aws/s3_storage_client_test.go index 00ec9eba4072f..769f8cf00665c 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client_test.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client_test.go @@ -21,6 +21,11 @@ import ( "go.uber.org/atomic" "github.com/grafana/loki/pkg/storage/chunk/client/hedging" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" ) type RoundTripperFunc func(*http.Request) (*http.Response, error) @@ -195,3 +200,23 @@ session_token: session token require.Equal(t, underTest.SessionToken.String(), "session token") } + +type testCommonPrefixesS3Client struct { + s3iface.S3API +} + +func (m *testCommonPrefixesS3Client) ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error) { + var commonPrefixes []*s3.CommonPrefix + commonPrefix := "common-prefix-repeated/" + for i := 0; i < 2; i++ { + commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{Prefix: aws.String(commonPrefix)}) + } + return &s3.ListObjectsV2Output{CommonPrefixes: commonPrefixes, IsTruncated: aws.Bool(false)}, nil +} + +func TestCommonPrefixes(t *testing.T) { + s3 := S3ObjectClient{S3: &testCommonPrefixesS3Client{}, bucketNames: []string{"bucket"}} + _, CommonPrefixes, err := s3.List(context.Background(), "", "/") + require.Equal(t, nil, err) + require.Equal(t, 1, len(CommonPrefixes)) +} diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go index f2eaa9f3733db..d4b5902516d20 100644 --- a/pkg/storage/config/schema_config.go +++ b/pkg/storage/config/schema_config.go @@ -491,6 +491,42 @@ func (cfg *IndexPeriodicTableConfig) Validate() error { return ValidatePathPrefix(cfg.PathPrefix) } +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (cfg *IndexPeriodicTableConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + g := struct { + PathPrefix string `yaml:"path_prefix"` + Prefix string `yaml:"prefix"` + Period model.Duration `yaml:"period"` + Tags Tags `yaml:"tags"` + }{} + if err := unmarshal(&g); err != nil { + return err + } + + cfg.PathPrefix = g.PathPrefix + cfg.Prefix = g.Prefix + cfg.Period = time.Duration(g.Period) + cfg.Tags = g.Tags + + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (cfg IndexPeriodicTableConfig) MarshalYAML() (interface{}, error) { + g := &struct { + PathPrefix string `yaml:"path_prefix"` + Prefix string `yaml:"prefix"` + Period model.Duration `yaml:"period"` + Tags Tags `yaml:"tags"` + }{ + PathPrefix: cfg.PathPrefix, + Prefix: cfg.Prefix, + Period: model.Duration(cfg.Period), + Tags: cfg.Tags, + } + + return g, nil +} func ValidatePathPrefix(prefix string) error { if prefix == "" { return errors.New("prefix must be set") diff --git a/pkg/storage/config/schema_config_test.go b/pkg/storage/config/schema_config_test.go index a547419987786..06fd191b7092a 100644 --- a/pkg/storage/config/schema_config_test.go +++ b/pkg/storage/config/schema_config_test.go @@ -503,6 +503,37 @@ func MustParseDayTime(s string) DayTime { return DayTime{model.TimeFromUnix(t.Unix())} } +func TestIndexPeriodicTableConfigCustomUnmarshalling(t *testing.T) { + yamlFile := `path_prefix: loki_index/ +prefix: cortex_ +period: 1w +tags: + foo: bar +` + + cfg := IndexPeriodicTableConfig{} + err := yaml.Unmarshal([]byte(yamlFile), &cfg) + require.NoError(t, err) + + expectedCfg := IndexPeriodicTableConfig{ + PathPrefix: "loki_index/", + PeriodicTableConfig: PeriodicTableConfig{ + Prefix: "cortex_", + Period: 7 * 24 * time.Hour, + Tags: map[string]string{ + "foo": "bar", + }, + }, + } + + require.Equal(t, expectedCfg, cfg) + + yamlGenerated, err := yaml.Marshal(&cfg) + require.NoError(t, err) + + require.Equal(t, yamlFile, string(yamlGenerated)) +} + func TestPeriodicTableConfigCustomUnmarshalling(t *testing.T) { yamlFile := `prefix: cortex_ period: 1w diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index 9311de2090bd5..fe0f1245be412 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -25,7 +25,9 @@ import ( "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/querier/astmapper" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/chunk/client/local" "github.com/grafana/loki/pkg/storage/config" @@ -494,6 +496,10 @@ func Test_store_SelectLogs(t *testing.T) { chunkMetrics: NilMetrics, } + tt.req.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(tt.req.Selector), + } + ctx = user.InjectOrgID(context.Background(), "test-user") it, err := s.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: tt.req}) if err != nil { @@ -818,6 +824,10 @@ func Test_store_SelectSample(t *testing.T) { chunkMetrics: NilMetrics, } + tt.req.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(tt.req.Selector), + } + ctx = user.InjectOrgID(context.Background(), "test-user") it, err := s.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: tt.req}) if err != nil { @@ -1385,6 +1395,9 @@ func Test_OverlappingChunks(t *testing.T) { Direction: logproto.BACKWARD, Start: time.Unix(0, 0), End: time.Unix(0, 10), + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"}`), + }, }}) if err != nil { t.Errorf("store.SelectLogs() error = %v", err) @@ -1497,6 +1510,15 @@ func Test_GetSeries(t *testing.T) { } { tt := tt t.Run(tt.name, func(t *testing.T) { + if tt.req.Selector != "" { + tt.req.Plan = &plan.QueryPlan{ + AST: syntax.MustParseExpr(tt.req.Selector), + } + } else { + tt.req.Plan = &plan.QueryPlan{ + AST: nil, + } + } series, err := store.SelectSeries(ctx, tt.req) require.NoError(t, err) require.Equal(t, tt.expectedSeries, series) diff --git a/pkg/storage/stores/shipper/bloomshipper/block_downloader.go b/pkg/storage/stores/shipper/bloomshipper/block_downloader.go index b6721db88640e..edf666c8e9569 100644 --- a/pkg/storage/stores/shipper/bloomshipper/block_downloader.go +++ b/pkg/storage/stores/shipper/bloomshipper/block_downloader.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" "github.com/go-kit/log" @@ -33,15 +34,23 @@ type blockDownloader struct { limits Limits activeUsersService *util.ActiveUsersCleanupService - ctx context.Context - manager *services.Manager - onWorkerStopCallback func() + ctx context.Context + manager *services.Manager + wg sync.WaitGroup +} + +type queueLimits struct { + limits Limits +} + +func (l *queueLimits) MaxConsumers(tenantID string, _ int) int { + return l.limits.BloomGatewayBlocksDownloadingParallelism(tenantID) } func newBlockDownloader(config config.Config, blockClient BlockClient, limits Limits, logger log.Logger, reg prometheus.Registerer) (*blockDownloader, error) { queueMetrics := queue.NewMetrics(reg, constants.Loki, "bloom_blocks_downloader") //add cleanup service - downloadingQueue := queue.NewRequestQueue(config.BlocksDownloadingQueue.MaxTasksEnqueuedPerTenant, time.Minute, queueMetrics) + downloadingQueue := queue.NewRequestQueue(config.BlocksDownloadingQueue.MaxTasksEnqueuedPerTenant, time.Minute, &queueLimits{limits: limits}, queueMetrics) activeUsersService := util.NewActiveUsersCleanupWithDefaultValues(queueMetrics.Cleanup) ctx := context.Background() @@ -55,19 +64,20 @@ func newBlockDownloader(config config.Config, blockClient BlockClient, limits Li } b := &blockDownloader{ - ctx: ctx, - logger: logger, - workingDirectory: config.WorkingDirectory, - queueMetrics: queueMetrics, - queue: downloadingQueue, - blockClient: blockClient, - activeUsersService: activeUsersService, - limits: limits, - manager: manager, - onWorkerStopCallback: onWorkerStopNoopCallback, + ctx: ctx, + logger: logger, + workingDirectory: config.WorkingDirectory, + queueMetrics: queueMetrics, + queue: downloadingQueue, + blockClient: blockClient, + activeUsersService: activeUsersService, + limits: limits, + manager: manager, + wg: sync.WaitGroup{}, } for i := 0; i < config.BlocksDownloadingQueue.WorkersCount; i++ { + b.wg.Add(1) go b.serveDownloadingTasks(fmt.Sprintf("worker-%d", i)) } return b, nil @@ -91,17 +101,15 @@ func NewBlockDownloadingTask(ctx context.Context, block BlockRef, resCh chan<- b } } -// noop implementation -var onWorkerStopNoopCallback = func() {} - func (d *blockDownloader) serveDownloadingTasks(workerID string) { + // defer first, so it gets executed as last of the deferred functions + defer d.wg.Done() + logger := log.With(d.logger, "worker", workerID) level.Debug(logger).Log("msg", "starting worker") d.queue.RegisterConsumerConnection(workerID) defer d.queue.UnregisterConsumerConnection(workerID) - //this callback is used only in the tests to assert that worker is stopped - defer d.onWorkerStopCallback() idx := queue.StartIndexWithLocalQueue @@ -155,11 +163,10 @@ func (d *blockDownloader) downloadBlocks(ctx context.Context, tenantID string, r errCh := make(chan error, len(references)) blocksCh := make(chan blockWithQuerier, len(references)) - downloadingParallelism := d.limits.BloomGatewayBlocksDownloadingParallelism(tenantID) for _, reference := range references { task := NewBlockDownloadingTask(ctx, reference, blocksCh, errCh) level.Debug(d.logger).Log("msg", "enqueuing task to download block", "block", reference.BlockPath) - err := d.queue.Enqueue(tenantID, nil, task, downloadingParallelism, nil) + err := d.queue.Enqueue(tenantID, nil, task, nil) if err != nil { errCh <- fmt.Errorf("error enquing downloading task for block %s : %w", reference.BlockPath, err) return blocksCh, errCh @@ -203,6 +210,7 @@ func (d *blockDownloader) createBlockQuerier(directory string) *v1.BlockQuerier func (d *blockDownloader) stop() { _ = services.StopManagerAndAwaitStopped(d.ctx, d.manager) + d.wg.Wait() } func writeDataToTempFile(workingDirectoryPath string, block *Block) (string, error) { diff --git a/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go index b69d036d30e37..9eb7bc0e66c0d 100644 --- a/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go @@ -15,7 +15,6 @@ import ( "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "go.uber.org/atomic" v1 "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config" @@ -37,10 +36,6 @@ func Test_blockDownloader_downloadBlocks(t *testing.T) { MaxTasksEnqueuedPerTenant: 20, }, }, blockClient, overrides, log.NewNopLogger(), prometheus.DefaultRegisterer) - stoppedWorkersCount := atomic.NewInt32(0) - downloader.onWorkerStopCallback = func() { - stoppedWorkersCount.Inc() - } require.NoError(t, err) blocksCh, errorsCh := downloader.downloadBlocks(context.Background(), "fake", blockReferences) downloadedBlocks := make(map[string]any, len(blockReferences)) @@ -63,10 +58,13 @@ func Test_blockDownloader_downloadBlocks(t *testing.T) { } require.Len(t, downloadedBlocks, 20, "all 20 block must be downloaded") + // We want all workers to be connected to the queue + require.Equal(t, workersCount, int(downloader.queue.GetConnectedConsumersMetric())) + downloader.stop() - require.Eventuallyf(t, func() bool { - return stoppedWorkersCount.Load() == int32(workersCount) - }, 1*time.Second, 10*time.Millisecond, "expected all %d workers to be stopped", workersCount) + + // We want all workers to be disconnected from the queue + require.Equal(t, 0, int(downloader.queue.GetConnectedConsumersMetric())) } // creates fake blocks and returns map[block-path]Block and mockBlockClient diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go index 70c61ba0add8e..e24d7e35c412a 100644 --- a/pkg/storage/stores/shipper/bloomshipper/store.go +++ b/pkg/storage/stores/shipper/bloomshipper/store.go @@ -32,6 +32,7 @@ type Store interface { GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error) GetBlockQueriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) ([]BlockQuerierWithFingerprintRange, error) GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error) + ForEach(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error Stop() } @@ -54,6 +55,11 @@ func (bs *BloomStore) GetBlockRefs(ctx context.Context, tenant string, from, thr return bs.shipper.GetBlockRefs(ctx, tenant, from, through) } +// ForEach implements Store +func (bs *BloomStore) ForEach(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error { + return bs.shipper.Fetch(ctx, tenant, blocks, callback) +} + // GetQueriersForBlocks implements Store func (bs *BloomStore) GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error) { bqs := make([]BlockQuerierWithFingerprintRange, 0, 32) diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index 6dff5146af42b..b59b729b0d5bd 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/querier/astmapper" + "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/storage/chunk" "github.com/grafana/loki/pkg/storage/chunk/cache" chunkclient "github.com/grafana/loki/pkg/storage/chunk/client" @@ -135,6 +136,9 @@ func newQuery(query string, start, end time.Time, shards []astmapper.ShardAnnota End: end, Direction: logproto.FORWARD, Deletes: deletes, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(query), + }, } for _, shard := range shards { req.Shards = append(req.Shards, shard.String()) @@ -148,6 +152,9 @@ func newSampleQuery(query string, start, end time.Time, deletes []*logproto.Dele Start: start, End: end, Deletes: deletes, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(query), + }, } return req } diff --git a/pkg/validation/exporter.go b/pkg/validation/exporter.go index bbc26d1b544d5..ad9dde8574dd0 100644 --- a/pkg/validation/exporter.go +++ b/pkg/validation/exporter.go @@ -52,7 +52,7 @@ func (oe *OverridesExporter) Collect(ch chan<- prometheus.Metric) { return float64(val.Field(i).Int()), true case model.Duration: return float64(val.Field(i).Interface().(model.Duration)), true - case flagext.ByteSize: + case uint, flagext.ByteSize: return float64(val.Field(i).Uint()), true case float64: return val.Field(i).Float(), true diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index c4e38a898d2c6..cc55662aa27ef 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -98,7 +98,8 @@ type Limits struct { MaxEntriesLimitPerQuery int `yaml:"max_entries_limit_per_query" json:"max_entries_limit_per_query"` MaxCacheFreshness model.Duration `yaml:"max_cache_freshness_per_query" json:"max_cache_freshness_per_query"` MaxStatsCacheFreshness model.Duration `yaml:"max_stats_cache_freshness" json:"max_stats_cache_freshness"` - MaxQueriersPerTenant int `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` + MaxQueriersPerTenant uint `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` + MaxQueryCapacity float64 `yaml:"max_query_capacity" json:"max_query_capacity"` QueryReadyIndexNumDays int `yaml:"query_ready_index_num_days" json:"query_ready_index_num_days"` QueryTimeout model.Duration `yaml:"query_timeout" json:"query_timeout"` @@ -190,6 +191,7 @@ type Limits struct { BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip"` BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate"` BloomGatewayBlocksDownloadingParallelism int `yaml:"bloom_gateway_blocks_downloading_parallelism" json:"bloom_gateway_blocks_downloading_parallelism"` + BloomGatewayCacheKeyInterval time.Duration `yaml:"bloom_gateway_cache_key_interval" json:"bloom_gateway_cache_key_interval"` AllowStructuredMetadata bool `yaml:"allow_structured_metadata,omitempty" json:"allow_structured_metadata,omitempty" doc:"description=Allow user to send structured metadata in push payload."` MaxStructuredMetadataSize flagext.ByteSize `yaml:"max_structured_metadata_size" json:"max_structured_metadata_size" doc:"description=Maximum size accepted for structured metadata per log line."` @@ -276,7 +278,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { _ = l.MaxStatsCacheFreshness.Set("10m") f.Var(&l.MaxStatsCacheFreshness, "frontend.max-stats-cache-freshness", "Do not cache requests with an end time that falls within Now minus this duration. 0 disables this feature (default).") - f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.") + f.UintVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.") + f.Float64Var(&l.MaxQueryCapacity, "frontend.max-query-capacity", 0, "How much of the available query capacity (\"querier\" components in distributed mode, \"read\" components in SSD mode) can be used by a single tenant. Allowed values are 0.0 to 1.0. For example, setting this to 0.5 would allow a tenant to use half of the available queriers for processing the query workload. If set to 0, query capacity is determined by frontend.max-queriers-per-tenant. When both frontend.max-queriers-per-tenant and frontend.max-query-capacity are configured, smaller value of the resulting querier replica count is considered: min(frontend.max-queriers-per-tenant, ceil(querier_replicas * frontend.max-query-capacity)). *All* queriers will handle requests for the tenant if neither limits are applied. This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL. Use this feature in a multi-tenant setup where you need to limit query capacity for certain tenants.") f.IntVar(&l.QueryReadyIndexNumDays, "store.query-ready-index-num-days", 0, "Number of days of index to be kept always downloaded for queries. Applies only to per user index in boltdb-shipper index store. 0 to disable.") f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.") @@ -311,6 +314,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.BloomNGramSkip, "bloom-compactor.ngram-skip", 0, "Skip factor for the n-grams created when computing blooms from log lines.") f.Float64Var(&l.BloomFalsePositiveRate, "bloom-compactor.false-positive-rate", 0.01, "Scalable Bloom Filter desired false-positive rate.") f.IntVar(&l.BloomGatewayBlocksDownloadingParallelism, "bloom-gateway.blocks-downloading-parallelism", 50, "Maximum number of blocks will be downloaded in parallel by the Bloom Gateway.") + f.DurationVar(&l.BloomGatewayCacheKeyInterval, "bloom-gateway.cache-key-interval", 15*time.Minute, "Interval for computing the cache key in the Bloom Gateway.") l.ShardStreams = &shardstreams.Config{} l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f) @@ -368,6 +372,16 @@ func (l *Limits) Validate() error { level.Warn(util_log.Logger).Log("msg", "The compactor.allow-deletes configuration option has been deprecated and will be ignored. Instead, use deletion_mode in the limits_configs to adjust deletion functionality") } + if l.MaxQueryCapacity < 0 { + level.Warn(util_log.Logger).Log("msg", "setting frontend.max-query-capacity to 0 as it is configured to a value less than 0") + l.MaxQueryCapacity = 0 + } + + if l.MaxQueryCapacity > 1 { + level.Warn(util_log.Logger).Log("msg", "setting frontend.max-query-capacity to 1 as it is configured to a value greater than 1") + l.MaxQueryCapacity = 1 + } + return nil } @@ -502,10 +516,15 @@ func (o *Overrides) MaxQueryRange(_ context.Context, userID string) time.Duratio } // MaxQueriersPerUser returns the maximum number of queriers that can handle requests for this user. -func (o *Overrides) MaxQueriersPerUser(userID string) int { +func (o *Overrides) MaxQueriersPerUser(userID string) uint { return o.getOverridesForUser(userID).MaxQueriersPerTenant } +// MaxQueryCapacity returns how much of the available query capacity can be used by this user.. +func (o *Overrides) MaxQueryCapacity(userID string) float64 { + return o.getOverridesForUser(userID).MaxQueryCapacity +} + // QueryReadyIndexNumDays returns the number of days for which we have to be query ready for a user. func (o *Overrides) QueryReadyIndexNumDays(userID string) int { return o.getOverridesForUser(userID).QueryReadyIndexNumDays @@ -794,6 +813,10 @@ func (o *Overrides) BloomGatewayBlocksDownloadingParallelism(userID string) int return o.getOverridesForUser(userID).BloomGatewayBlocksDownloadingParallelism } +func (o *Overrides) BloomGatewayCacheKeyInterval(userID string) time.Duration { + return o.getOverridesForUser(userID).BloomGatewayCacheKeyInterval +} + func (o *Overrides) BloomGatewayEnabled(userID string) bool { return o.getOverridesForUser(userID).BloomGatewayEnabled } diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 2f52addf52022..26a36fbbfbc28 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,18 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 5.40.1 + +- [BUGFIX] Remove ruler enabled condition in networkpolicies. + +## 5.40.0 + +- [CHANGE] Add extraContainers parameter for the write pod + +## 5.39.0 + +- [FEATURE] Add support for adding OpenStack swift container credentials via helm chart + ## 5.38.0 - [CHANGE] Changed MinIO Helm Chart version to 4.0.15 diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 2e2d94b49df38..c48b788c8445f 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.9.2 -version: 5.38.0 +version: 5.40.1 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 76ab849b64463..d35dd7d248ff1 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.38.0](https://img.shields.io/badge/Version-5.38.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) +![Version: 5.40.1](https://img.shields.io/badge/Version-5.40.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index 964a5a6dcd97b..eb3bf470a6313 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -277,6 +277,39 @@ azure: endpoint_suffix: {{ . }} {{- end }} {{- end -}} +{{- else if eq .Values.loki.storage.type "swift" -}} +{{- with .Values.loki.storage.swift }} +swift: + {{- with .auth_version }} + auth_version: {{ . }} + {{- end }} + auth_url: {{ .auth_url }} + {{- with .internal }} + internal: {{ . }} + {{- end }} + username: {{ .username }} + user_domain_name: {{ .user_domain_name }} + {{- with .user_domain_id }} + user_domain_id: {{ . }} + {{- end }} + {{- with .user_id }} + user_id: {{ . }} + {{- end }} + password: {{ .password }} + {{- with .domain_id }} + domain_id: {{ . }} + {{- end }} + domain_name: {{ .domain_name }} + project_id: {{ .project_id }} + project_name: {{ .project_name }} + project_domain_id: {{ .project_domain_id }} + project_domain_name: {{ .project_domain_name }} + region_name: {{ .region_name }} + container_name: {{ .container_name }} + max_retries: {{ .max_retries | default 3 }} + connect_timeout: {{ .connect_timeout | default "10s" }} + request_timeout: {{ .request_timeout | default "5s" }} +{{- end -}} {{- else -}} {{- with .Values.loki.storage.filesystem }} filesystem: @@ -350,6 +383,39 @@ azure: endpoint_suffix: {{ . }} {{- end }} {{- end -}} +{{- else if eq .Values.loki.storage.type "swift" -}} +{{- with .Values.loki.storage.swift }} +swift: + {{- with .auth_version }} + auth_version: {{ . }} + {{- end }} + auth_url: {{ .auth_url }} + {{- with .internal }} + internal: {{ . }} + {{- end }} + username: {{ .username }} + user_domain_name: {{ .user_domain_name }} + {{- with .user_domain_id }} + user_domain_id: {{ . }} + {{- end }} + {{- with .user_id }} + user_id: {{ . }} + {{- end }} + password: {{ .password }} + {{- with .domain_id }} + domain_id: {{ . }} + {{- end }} + domain_name: {{ .domain_name }} + project_id: {{ .project_id }} + project_name: {{ .project_name }} + project_domain_id: {{ .project_domain_id }} + project_domain_name: {{ .project_domain_name }} + region_name: {{ .region_name }} + container_name: {{ .container_name }} + max_retries: {{ .max_retries | default 3 }} + connect_timeout: {{ .connect_timeout | default "10s" }} + request_timeout: {{ .request_timeout | default "5s" }} +{{- end -}} {{- else }} type: "local" {{- end -}} diff --git a/production/helm/loki/templates/networkpolicy.yaml b/production/helm/loki/templates/networkpolicy.yaml index c6d5fa0264a41..4424d90db08d4 100644 --- a/production/helm/loki/templates/networkpolicy.yaml +++ b/production/helm/loki/templates/networkpolicy.yaml @@ -112,7 +112,6 @@ spec: {{- end }} {{- end }} -{{- if .Values.ruler.enabled }} --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -126,7 +125,7 @@ spec: - Egress podSelector: matchLabels: - {{- include "loki.rulerSelectorLabels" . | nindent 6 }} + {{- include "loki.backendSelectorLabels" . | nindent 6 }} egress: - ports: - port: {{ .Values.networkPolicy.alertmanager.port }} @@ -140,7 +139,6 @@ spec: {{- toYaml .Values.networkPolicy.alertmanager.podSelector | nindent 12 }} {{- end }} {{- end }} -{{- end }} {{- if .Values.networkPolicy.externalStorage.ports }} --- diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml index fdbc2f04d20b3..8c5e426d3ffd6 100644 --- a/production/helm/loki/templates/write/statefulset-write.yaml +++ b/production/helm/loki/templates/write/statefulset-write.yaml @@ -138,6 +138,9 @@ spec: {{- end }} resources: {{- toYaml .Values.write.resources | nindent 12 }} + {{- with .Values.write.extraContainers }} + {{- toYaml . | nindent 8}} + {{- end }} {{- with .Values.write.affinity }} affinity: {{- tpl . $ | nindent 8 }} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 472882a226c8b..738cf6ea25ae7 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -298,6 +298,26 @@ loki: userAssignedId: null requestTimeout: null endpointSuffix: null + swift: + auth_version: null + auth_url: null + internal: null + username: null + user_domain_name: null + user_domain_id: null + user_id: null + password: null + domain_id: null + domain_name: null + project_id: null + project_name: null + project_domain_id: null + project_domain_name: null + region_name: null + container_name: null + max_retries: null + connect_timeout: null + request_timeout: null filesystem: chunks_directory: /var/loki/chunks rules_directory: /var/loki/rules @@ -780,6 +800,8 @@ write: # https://github.com/grafana/loki/blob/main/docs/sources/operations/storage/wal.md#how-to-scale-updown # -- Init containers to add to the write pods initContainers: [] + # -- Containers to add to the write pods + extraContainers: [] # -- Volume mounts to add to the write pods extraVolumeMounts: [] # -- Volumes to add to the write pods diff --git a/production/ksonnet/loki/memberlist.libsonnet b/production/ksonnet/loki/memberlist.libsonnet index 5bd95183c6bef..636fd90e1f0cb 100644 --- a/production/ksonnet/loki/memberlist.libsonnet +++ b/production/ksonnet/loki/memberlist.libsonnet @@ -159,4 +159,5 @@ // Disable the consul deployment if not migrating and using memberlist consul_deployment: if $._config.memberlist_ring_enabled && !$._config.multikv_migration_enabled && !$._config.multikv_migration_teardown then {} else super.consul_deployment, consul_service: if $._config.memberlist_ring_enabled && !$._config.multikv_migration_enabled && !$._config.multikv_migration_teardown then {} else super.consul_service, + consul_config_map: if $._config.memberlist_ring_enabled && !$._config.multikv_migration_enabled && !$._config.multikv_migration_teardown then {} else super.consul_config_map, } diff --git a/tools/dev/loki-boltdb-storage-s3/compose-up.sh b/tools/dev/loki-boltdb-storage-s3/compose-up.sh index 1841f312ca33f..2d26a83123c9e 100755 --- a/tools/dev/loki-boltdb-storage-s3/compose-up.sh +++ b/tools/dev/loki-boltdb-storage-s3/compose-up.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff --git a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml b/tools/dev/loki-boltdb-storage-s3/config/loki.yaml index de0dbd713d92c..83149885fe85b 100644 --- a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml +++ b/tools/dev/loki-boltdb-storage-s3/config/loki.yaml @@ -108,6 +108,7 @@ schema_config: object_store: s3 schema: v11 store: boltdb-shipper + row_shards: 4 server: graceful_shutdown_timeout: 5s grpc_server_max_concurrent_streams: 1000 diff --git a/tools/lambda-promtail/lambda-promtail/cw.go b/tools/lambda-promtail/lambda-promtail/cw.go index 895cd66c8f450..1ad6bf34878ed 100644 --- a/tools/lambda-promtail/lambda-promtail/cw.go +++ b/tools/lambda-promtail/lambda-promtail/cw.go @@ -18,6 +18,7 @@ func parseCWEvent(ctx context.Context, b *batch, ev *events.CloudwatchLogsEvent) } labels := model.LabelSet{ + model.LabelName("__aws_log_type"): model.LabelValue("cloudwatch"), model.LabelName("__aws_cloudwatch_log_group"): model.LabelValue(data.LogGroup), model.LabelName("__aws_cloudwatch_owner"): model.LabelValue(data.Owner), } diff --git a/tools/lambda-promtail/lambda-promtail/cw_test.go b/tools/lambda-promtail/lambda-promtail/cw_test.go new file mode 100644 index 0000000000000..9ad5a907c7711 --- /dev/null +++ b/tools/lambda-promtail/lambda-promtail/cw_test.go @@ -0,0 +1,60 @@ +package main + +import ( + "context" + "testing" + + "github.com/aws/aws-lambda-go/events" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logproto" +) + +func Test_parseCWEvent(t *testing.T) { + tests := []struct { + name string + b *batch + expectedStream string + keepStream bool + }{ + { + name: "cloudwatch", + b: &batch{ + streams: map[string]*logproto.Stream{}, + }, + expectedStream: `{__aws_cloudwatch_log_group="testLogGroup", __aws_cloudwatch_owner="123456789123", __aws_log_type="cloudwatch"}`, + keepStream: false, + }, + { + name: "cloudwatch_keepStream", + b: &batch{ + streams: map[string]*logproto.Stream{}, + }, + expectedStream: `{__aws_cloudwatch_log_group="testLogGroup", __aws_cloudwatch_log_stream="testLogStream", __aws_cloudwatch_owner="123456789123", __aws_log_type="cloudwatch"}`, + keepStream: true, + }, + } + + for _, tt := range tests { + // Docs: https://docs.aws.amazon.com/lambda/latest/dg/services-cloudwatchlogs.html + // Example CloudWatchLogEvent copied from https://github.com/aws/aws-lambda-go/blob/main/events/cloudwatch_logs_test.go + cwevent := &events.CloudwatchLogsEvent{ + AWSLogs: events.CloudwatchLogsRawData{ + Data: "H4sIAAAAAAAAAHWPwQqCQBCGX0Xm7EFtK+smZBEUgXoLCdMhFtKV3akI8d0bLYmibvPPN3wz00CJxmQnTO41whwWQRIctmEcB6sQbFC3CjW3XW8kxpOpP+OC22d1Wml1qZkQGtoMsScxaczKN3plG8zlaHIta5KqWsozoTYw3/djzwhpLwivWFGHGpAFe7DL68JlBUk+l7KSN7tCOEJ4M3/qOI49vMHj+zCKdlFqLaU2ZHV2a4Ct/an0/ivdX8oYc1UVX860fQDQiMdxRQEAAA==", + }, + } + + t.Run(tt.name, func(t *testing.T) { + batchSize = 131072 // Set large enough we don't send to promtail + keepStream = tt.keepStream + err := parseCWEvent(context.Background(), tt.b, cwevent) + if err != nil { + t.Error(err) + } + require.Len(t, tt.b.streams, 1) + stream, ok := tt.b.streams[tt.expectedStream] + require.True(t, ok, "batch does not contain stream: %s", tt.expectedStream) + require.NotNil(t, stream) + }) + } +} diff --git a/tools/tsdb/bloom-tester/lib.go b/tools/tsdb/bloom-tester/lib.go index 7a88edb72d8c4..1bdd4042ade0e 100644 --- a/tools/tsdb/bloom-tester/lib.go +++ b/tools/tsdb/bloom-tester/lib.go @@ -357,8 +357,10 @@ func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexS Bloom: &bloom, Series: &series, } - bloomTokenizer.PopulateSeriesWithBloom(&swb, got) - + err := bloomTokenizer.PopulateSeriesWithBloom(&swb, got) + if err != nil { + level.Error(util_log.Logger).Log("msg", "failed populating SeriesWithBloom", "err", err) + } endTime := time.Now().UnixMilli() if len(got) > 0 { metrics.bloomSize.WithLabelValues(experiment.name).Observe(float64(sbf.Capacity() / 8)) diff --git a/tools/tsdb/bloom-tester/readlib.go b/tools/tsdb/bloom-tester/readlib.go index 6be3b767ec634..77d9e3967ca86 100644 --- a/tools/tsdb/bloom-tester/readlib.go +++ b/tools/tsdb/bloom-tester/readlib.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "github.com/grafana/dskit/services" "github.com/grafana/loki/pkg/chunkenc"