diff --git a/CHANGELOG.md b/CHANGELOG.md index 294f7ee010378..b59bfa014bd5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ * [12398](https://github.com/grafana/loki/pull/12398) **kolesnikovae** LogQL: Introduces pattern match filter operators. ##### Fixes + * [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var. * [11195](https://github.com/grafana/loki/pull/11195) **canuteson** Generate tsdb_shipper storage_config even if using_boltdb_shipper is false * [9831](https://github.com/grafana/loki/pull/9831) **sijmenhuizenga**: Fix Promtail excludepath not evaluated on newly added files. @@ -103,7 +104,6 @@ ## [2.9.6](https://github.com/grafana/loki/compare/v2.9.5...v2.9.6) (2024-03-21) - ### Bug Fixes * promtail failures connecting to local loki installation [release-2.9.x] ([#12184](https://github.com/grafana/loki/issues/12184)) ([8585e35](https://github.com/grafana/loki/commit/8585e3537375c0deb11462d7256f5da23228f5e1)) @@ -149,21 +149,18 @@ ## [2.8.11](https://github.com/grafana/loki/compare/v2.8.10...v2.8.11) (2024-03-22) - ### Bug Fixes * update google.golang.org/protobuf to v1.33.0 ([#12276](https://github.com/grafana/loki/issues/12276)) ([3c05724](https://github.com/grafana/loki/commit/3c05724ac9d7ea9b6048c6e67cd13dc55fa72782)) ## [2.8.10](https://github.com/grafana/loki/compare/v2.8.9...v2.8.10) (2024-02-28) - ### Bug Fixes * image tag from env and pin release to v1.11.5 ([#12073](https://github.com/grafana/loki/issues/12073)) ([8e11cd7](https://github.com/grafana/loki/commit/8e11cd7a8222a64d60bff30a41e399ddbda3372e)) ## [2.8.9](https://github.com/grafana/loki/compare/v2.8.8...v2.8.9) (2024-02-23) - ### Bug Fixes * bump alpine base image and go to fix CVEs ([#12026](https://github.com/grafana/loki/issues/12026)) ([196650e](https://github.com/grafana/loki/commit/196650e4c119249016df85a50a2cced521cbe9be)) @@ -192,7 +189,6 @@ * [10585](https://github.com/grafana/loki/pull/10585) **ashwanthgoli** / **chaudum**: Fix bug in index object client that could result in not showing all ingested logs in query results. * [10314](https://github.com/grafana/loki/pull/10314) **bboreham**: Fix race conditions in indexshipper. - ## 2.9.0 (2023-09-06) ### All Changes @@ -318,6 +314,7 @@ #### FluentD ##### Enhancements + * [LOG-4012](https://issues.redhat.com/browse/LOG-4012) **jcantril**: fluent-plugin-grapha-loki: Add config to support tls: ciphers, min_versio #### Jsonnet @@ -628,6 +625,7 @@ Check the history of the branch `release-2.7.x`. #### Loki ##### Enhancements + * [7436](https://github.com/grafana/loki/pull/7436) **periklis**: Expose ring and memberlist handlers through internal server listener * [7227](https://github.com/grafana/loki/pull/7227) **Red-GV**: Add ability to configure tls minimum version and cipher suites * [7179](https://github.com/grafana/loki/pull/7179) **vlad-diachenko**: Add ability to use Azure Service Principals credentials to authenticate to Azure Blob Storage. @@ -648,6 +646,7 @@ Check the history of the branch `release-2.7.x`. * [6952](https://github.com/grafana/loki/pull/6952) **DylanGuedes**: Experimental: Introduce a new feature named stream sharding. ##### Fixes + * [7426](https://github.com/grafana/loki/pull/7426) **periklis**: Add missing compactor delete client tls client config * [7238](https://github.com/grafana/loki/pull/7328) **periklis**: Fix internal server bootstrap for query frontend * [7288](https://github.com/grafana/loki/pull/7288) **ssncferreira**: Fix query mapping in AST mapper `rangemapper` to support the new `VectorExpr` expression. @@ -660,6 +659,7 @@ Check the history of the branch `release-2.7.x`. * [6372](https://github.com/grafana/loki/pull/6372) **splitice**: Add support for numbers in JSON fields. ##### Changes + * [6726](https://github.com/grafana/loki/pull/6726) **kavirajk**: upgrades go from 1.17.9 -> 1.18.4 * [6415](https://github.com/grafana/loki/pull/6415) **salvacorts**: Evenly spread queriers across kubernetes nodes. * [6349](https://github.com/grafana/loki/pull/6349) **simonswine**: Update the default HTTP listen port from 80 to 3100. Make sure to configure the port explicitly if you are using port 80. @@ -672,9 +672,11 @@ Check the history of the branch `release-2.7.x`. * [5400](https://github.com/grafana/loki/pull/5400) **BenoitKnecht**: promtail/server: Disable profiling by default #### Promtail + * [7470](https://github.com/grafana/loki/pull/7470) **Jack-King**: Add configuration for adding custom HTTP headers to push requests ##### Enhancements + * [7593](https://github.com/grafana/loki/pull/7593) **chodges15**: Promtail: Add tenant label to client drop metrics and logs * [7101](https://github.com/grafana/loki/pull/7101) **liguozhong**: Promtail: Add support for max stream limit. * [7247](https://github.com/grafana/loki/pull/7247) **liguozhong**: Add config reload endpoint / signal to promtail. @@ -686,27 +688,30 @@ Check the history of the branch `release-2.7.x`. * [7414](https://github.com/grafana/loki/pull/7414) **thepalbi**: Add basic tracing support ##### Fixes -* [7394](https://github.com/grafana/loki/pull/7394) **liguozhong**: Fix issue with the Cloudflare target that caused it to stop working after it received an error in the logpull request as explained in issue https://github.com/grafana/loki/issues/6150 -* [6766](https://github.com/grafana/loki/pull/6766) **kavirajk**: fix(logql): Make `LabelSampleExtractor` ignore processing the line if it doesn't contain that specific label. Fixes unwrap behavior explained in the issue https://github.com/grafana/loki/issues/6713 + +* [7394](https://github.com/grafana/loki/pull/7394) **liguozhong**: Fix issue with the Cloudflare target that caused it to stop working after it received an error in the logpull request as explained in issue +* [6766](https://github.com/grafana/loki/pull/6766) **kavirajk**: fix(logql): Make `LabelSampleExtractor` ignore processing the line if it doesn't contain that specific label. Fixes unwrap behavior explained in the issue * [7016](https://github.com/grafana/loki/pull/7016) **chodges15**: Fix issue with dropping logs when a file based SD target's labels are updated ##### Changes -* **quodlibetor**: Change Docker target discovery log level from `Error` to `Info` +* **quodlibetor**: Change Docker target discovery log level from `Error` to `Info` #### Logcli + * [7325](https://github.com/grafana/loki/pull/7325) **dbirks**: Document setting up command completion * [8518](https://github.com/grafana/loki/pull/8518) **SN9NV**: Add parallel flags #### Fluent Bit #### Loki Canary + * [7398](https://github.com/grafana/loki/pull/7398) **verejoel**: Allow insecure TLS connections #### Jsonnet -* [6189](https://github.com/grafana/loki/pull/6189) **irizzant**: Add creation of a `ServiceMonitor` object for Prometheus scraping through configuration parameter `create_service_monitor`. Simplify mixin usage by adding (https://github.com/prometheus-operator/kube-prometheus) library. -* [6662](https://github.com/grafana/loki/pull/6662) **Whyeasy**: Fixes memberlist error when using a stateful ruler. +* [6189](https://github.com/grafana/loki/pull/6189) **irizzant**: Add creation of a `ServiceMonitor` object for Prometheus scraping through configuration parameter `create_service_monitor`. Simplify mixin usage by adding () library. +* [6662](https://github.com/grafana/loki/pull/6662) **Whyeasy**: Fixes memberlist error when using a stateful ruler. ### Notes @@ -729,11 +734,13 @@ Check the history of the branch `release-2.7.x`. # 2.6.0 (2022/07/08) ### All Changes + Here is the list with the changes that were produced since the previous release. #### Loki ##### Enhancements + * [5662](https://github.com/grafana/loki/pull/5662) **ssncferreira** **chaudum** Improve performance of instant queries by splitting range into multiple subqueries that are executed in parallel. * [5848](https://github.com/grafana/loki/pull/5848) **arcosx**: Add Baidu AI Cloud as a storage backend choice. * [6410](https://github.com/grafana/loki/pull/6410) **MichelHollands**: Add support for per tenant delete API access enabling. @@ -744,12 +751,14 @@ Here is the list with the changes that were produced since the previous release. * [6163](https://github.com/grafana/loki/pull/6163) **jburnham**: LogQL: Add a `default` sprig template function in LogQL label/line formatter. ##### Fixes + * [6152](https://github.com/grafana/loki/pull/6152) **slim-bean**: Fixes unbounded ingester memory growth when live tailing under specific circumstances. * [5685](https://github.com/grafana/loki/pull/5685) **chaudum**: Fix bug in push request parser that allowed users to send arbitrary non-string data as "log line". * [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fix deduping issues when multiple entries with the same timestamp exist. !hide or not hide (bugfix Loki) * [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common configuration block net interface name when overwritten by ring common configuration. ##### Changes + * [6361](https://github.com/grafana/loki/pull/6361) **chaudum**: Sum values in unwrapped rate aggregation instead of treating them as counter. * [6412](https://github.com/grafana/loki/pull/6412) **chaudum**: Add new unwrapped range aggregation `rate_counter()` to LogQL * [6042](https://github.com/grafana/loki/pull/6042) **slim-bean**: Add a new configuration to allow fudging of ingested timestamps to guarantee sort order of duplicate timestamps at query time. @@ -763,6 +772,7 @@ Here is the list with the changes that were produced since the previous release. #### Promtail ##### Enhancements + * [6105](https://github.com/grafana/loki/pull/6105) **rutgerke** Export metrics for the Promtail journal target. * [5943](https://github.com/grafana/loki/pull/5943) **tpaschalis**: Add configuration support for excluding configuration files when instantiating Promtail. * [5790](https://github.com/grafana/loki/pull/5790) **chaudum**: Add UDP support for Promtail's syslog target. @@ -772,17 +782,24 @@ Here is the list with the changes that were produced since the previous release. * [6395](https://github.com/grafana/loki/pull/6395) **DylanGuedes**: Add encoding support ##### Fixes + * [6034](https://github.com/grafana/loki/pull/6034) **DylanGuedes**: Promtail: Fix symlink tailing behavior. + ##### Changes + * [6371](https://github.com/grafana/loki/pull/6371) **witalisoft**: BREAKING: Support more complex match based on multiple extracted data fields in drop stage * [5686](https://github.com/grafana/loki/pull/5686) **ssncferreira**: Move promtail StreamLagLabels config to upper level config.Config * [5839](https://github.com/grafana/loki/pull/5839) **marctc**: Add ActiveTargets method to promtail * [5661](https://github.com/grafana/loki/pull/5661) **masslessparticle**: Invalidate caches on deletes + #### Fluent Bit + * [5711](https://github.com/grafana/loki/pull/5711) **MichelHollands**: Update fluent-bit output name #### Loki Canary + * [6310](https://github.com/grafana/loki/pull/6310) **chodges15**: Add support for client-side TLS certs in loki-canary for Loki connection + ### Notes This release was created from a branch starting at commit `1794a766134f07b54386b1a431b58e1d44e6d7f7` but it may also contain backported changes from main. @@ -806,6 +823,7 @@ to include only the most relevant. #### Loki ##### Enhancements + * [5542](https://github.com/grafana/loki/pull/5542) **bboreham**: regexp filter: use modified package with optimisations * [5318](https://github.com/grafana/loki/pull/5318) **jeschkies**: Speed up `EntrySortIterator` by 20%. * [5317](https://github.com/grafana/loki/pull/5317) **owen-d**: Logql/parallel binop @@ -822,9 +840,11 @@ to include only the most relevant. * [5013](https://github.com/grafana/loki/pull/5013) **liguozhong**: [new feature] logql: extrapolate unwrapped rate function * [4947](https://github.com/grafana/loki/pull/4947) **siavashs**: Support Redis Cluster Configuration Endpoint * [4938](https://github.com/grafana/loki/pull/4938) **DylanGuedes**: Add distributor ring page -* [4879](https://github.com/grafana/loki/pull/4879) **cyriltovena**: LogQL: add __line__ function to | line_format template +* [4879](https://github.com/grafana/loki/pull/4879) **cyriltovena**: LogQL: add **line** function to | line_format template * [4858](https://github.com/grafana/loki/pull/4858) **sandy2008**: feat(): add ManagedIdentity in Azure Blob Storage + ## Main + * [5789](https://github.com/grafana/loki/pull/5789) **bboreham**: Production config: add dot to some DNS address to reduce lookups. * [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4. * [5715](https://github.com/grafana/loki/pull/5715) **chaudum** Add option to push RFC5424 syslog messages from Promtail in syslog scrape target. @@ -863,7 +883,7 @@ to include only the most relevant. * [5144](https://github.com/grafana/loki/pull/5144) **dannykopping** Ruler: fix remote write basic auth credentials. * [5091](https://github.com/grafana/loki/pull/5091) **owen-d**: Changes `ingester.concurrent-flushes` default to 32 * [5031](https://github.com/grafana/loki/pull/5031) **liguozhong**: Promtail: Add global read rate limiting. -* [4879](https://github.com/grafana/loki/pull/4879) **cyriltovena**: LogQL: add __line__ function to | line_format template. +* [4879](https://github.com/grafana/loki/pull/4879) **cyriltovena**: LogQL: add **line** function to | line_format template. * [5081](https://github.com/grafana/loki/pull/5081) **SasSwart**: Add the option to configure memory ballast for Loki * [5085](https://github.com/grafana/loki/pull/5085) **aknuds1**: Upgrade Cortex to [e0807c4eb487](https://github.com/cortexproject/cortex/compare/4e9fc3a2b5ab..e0807c4eb487) and Prometheus to [692a54649ed7](https://github.com/prometheus/prometheus/compare/2a3d62ac8456..692a54649ed7) * [5067](https://github.com/grafana/loki/pull/5057) **cstyan**: Add a metric to Azure Blob Storage client to track total egress bytes @@ -886,7 +906,6 @@ to include only the most relevant. * [4731](https://github.com/grafana/loki/pull/4731) **cyriltovena**: Improve heap iterators. * [4394](https://github.com/grafana/loki/pull/4394) **cyriltovena**: Improve case insensitive search to avoid allocations. - ##### Fixes * [5768](https://github.com/grafana/loki/pull/5768) **slim-bean**: Loki: Increase flush_op_timeout default from 10s to 10m @@ -927,6 +946,7 @@ to include only the most relevant. * [4741](https://github.com/grafana/loki/pull/4741) **sandeepsukhani**: index cleanup fixes while applying retention ##### Changes + * [5544](https://github.com/grafana/loki/pull/5544) **ssncferreira**: Update vectorAggEvaluator to fail for expressions without grouping * [5543](https://github.com/grafana/loki/pull/5543) **cyriltovena**: update loki go version to 1.17.8 * [5450](https://github.com/grafana/loki/pull/5450) **BenoitKnecht**: pkg/ruler/base: Add external_labels option @@ -958,10 +978,10 @@ to include only the most relevant. * [4736](https://github.com/grafana/loki/pull/4736) **sandeepsukhani**: allow applying retention at different interval than compaction with a config * [4656](https://github.com/grafana/loki/pull/4656) **ssncferreira**: Fix dskit/ring metric with 'cortex_' prefix - #### Promtail ##### Enhancements + * [5359](https://github.com/grafana/loki/pull/5359) **JBSchami**: Lambda-promtail: Enhance lambda-promtail to support adding extra labels from an environment variable value * [5290](https://github.com/grafana/loki/pull/5290) **ssncferreira**: Update promtail to support duration string formats * [5051](https://github.com/grafana/loki/pull/5051) **liguozhong**: [new] promtail pipeline: Promtail Rate Limit stage #5048 @@ -972,6 +992,7 @@ to include only the most relevant. * [4663](https://github.com/grafana/loki/pull/4663) **taisho6339**: Add SASL&mTLS authentication support for Kafka in Promtail ##### Fixes + * [5497](https://github.com/grafana/loki/pull/5497) **MasslessParticle**: Fix orphaned metrics in the file tailer * [5409](https://github.com/grafana/loki/pull/5409) **ldb**: promtail/targets/syslog: Enable best effort parsing for Syslog messages * [5246](https://github.com/grafana/loki/pull/5246) **rsteneteg**: Promtail: skip glob search if filetarget path is an existing file and not a directory @@ -982,24 +1003,29 @@ to include only the most relevant. * [5698](https://github.com/grafana/loki/pull/5698) **paullryan**: Promtail: Fix retry/stop when erroring for out of cloudflare retention range (e.g. over 168 hours old) ##### Changes + * [5377](https://github.com/grafana/loki/pull/5377) **slim-bean**: Promtail: Remove promtail_log_entries_bytes_bucket histogram * [5266](https://github.com/grafana/loki/pull/5266) **jeschkies**: Write Promtail position file atomically. * [4794](https://github.com/grafana/loki/pull/4794) **taisho6339**: Aggregate inotify watcher to file target manager * [4745](https://github.com/grafana/loki/pull/4745) **taisho6339**: Expose Kafka message key in labels #### Logcli + * [5477](https://github.com/grafana/loki/pull/5477) **atomic77**: logcli: Remove port from TLS server name when provided in --addr * [4667](https://github.com/grafana/loki/pull/4667) **jeschkies**: Package logcli as rpm and deb. * [4606](https://github.com/grafana/loki/pull/4606) **kavirajk**: Execute Loki queries on raw log data piped to stdin #### Lambda-Promtail + * [5065](https://github.com/grafana/loki/pull/5065) **AndreZiviani**: lambda-promtail: Add ability to ingest logs from S3 * [7632](https://github.com/grafana/loki/pull/7632) **changhyuni**: lambda-promtail: Add kinesis data stream to use in terraform #### Fluent Bit + * [5223](https://github.com/grafana/loki/pull/5223) **cyriltovena**: fluent-bit: Attempt to unmarshal nested json. #### FluentD + * [6240](https://github.com/grafana/loki/pull/6240) **taharah**: Add the feature flag `include_thread_label` to allow the `fluentd_thread` label included when using multiple threads for flushing to be configurable * [5107](https://github.com/grafana/loki/pull/5107) **chaudum**: fluentd: Fix bug that caused lines to be dropped when containing non utf-8 characters * [5163](https://github.com/grafana/loki/pull/5163) **chaudum**: Fix encoding error in fluentd client @@ -1168,6 +1194,7 @@ Here is a list of all changes included in 2.4.0. * [4071](https://github.com/grafana/loki/pull/4071) **jeschkies**: Support frontend V2 with query scheduler. #### Promtail + * [4599](https://github.com/grafana/loki/pull/4599) **rsteneteg**: [Promtail] resolve issue with promtail not scraping target if only path changed in a simpler way that dont need mutex to sync threads * [4588](https://github.com/grafana/loki/pull/4588) **owen-d**: regenerates assets from current vfsgen dependency * [4568](https://github.com/grafana/loki/pull/4568) **cyriltovena**: Promtail Kafka target @@ -1183,11 +1210,13 @@ Here is a list of all changes included in 2.4.0. * [3907](https://github.com/grafana/loki/pull/3907) **johanfleury**: promtail: add support for TLS/mTLS in syslog receiver #### Logcli + * [4303](https://github.com/grafana/loki/pull/4303) **cyriltovena**: Allow to run local boltdb queries with logcli. * [4242](https://github.com/grafana/loki/pull/4242) **chaudum**: cli: Register configuration option `store.max-look-back-period` as CLI argument * [4203](https://github.com/grafana/loki/pull/4203) **invidian**: cmd/logcli: add --follow flag as an alias for --tail #### Build + * [4639](https://github.com/grafana/loki/pull/4639) **slim-bean**: Build: simplify how protos are built * [4609](https://github.com/grafana/loki/pull/4609) **slim-bean**: Build: Update CODEOWNERS to put Karen back in charge of the docs * [4541](https://github.com/grafana/loki/pull/4541) **cstyan**: Fix drone ECR publish. @@ -1204,12 +1233,14 @@ Here is a list of all changes included in 2.4.0. * [4189](https://github.com/grafana/loki/pull/4189) **mathew-fleisch**: Makefile: Add darwin/arm64 build to release binaries #### Project + * [4535](https://github.com/grafana/loki/pull/4535) **carlpett**: Fix branch reference in PR template * [4604](https://github.com/grafana/loki/pull/4604) **kavirajk**: Update PR template to include `changelog` update in the checklist * [4494](https://github.com/grafana/loki/pull/4494) **cstyan**: Add a a parameter to keep/drop the stream label from cloudwatch. * [4315](https://github.com/grafana/loki/pull/4315) **cstyan**: Rewrite lambda-promtail to use subscription filters. #### Dashboards + * [4634](https://github.com/grafana/loki/pull/4634) **cyriltovena**: Fixes the operational dashboard using an old metric. * [4618](https://github.com/grafana/loki/pull/4618) **cstyan**: loki-mixin: fix label selectors + logs dashboard * [4575](https://github.com/grafana/loki/pull/4575) **dannykopping**: Adding recording rules dashboard @@ -1218,15 +1249,17 @@ Here is a list of all changes included in 2.4.0. * [4423](https://github.com/grafana/loki/pull/4423) **cstyan**: Add tag/link fix to operational dashboard and promtail mixin dashboard. * [4401](https://github.com/grafana/loki/pull/4401) **cstyan**: Minor dashboard fixes - #### Docker-driver + * [4396](https://github.com/grafana/loki/pull/4396) **owen-d**: Removes docker driver empty log line message * [4190](https://github.com/grafana/loki/pull/4190) **jeschkies**: Document known Docker driver issues. #### FluentD + * [4261](https://github.com/grafana/loki/pull/4261) **MrWong99**: FluentD output plugin: Remove an unused variable when processing chunks #### Docs + * [4646](https://github.com/grafana/loki/pull/4646) **KMiller-Grafana**: Docs: revise modes of operation section * [4631](https://github.com/grafana/loki/pull/4631) **kavirajk**: Add changelog and upgrade guide for #4556 * [4616](https://github.com/grafana/loki/pull/4616) **owen-d**: index-gw sts doc fix. closes #4583 @@ -1295,6 +1328,7 @@ Here is a list of all changes included in 2.4.0. * [3880](https://github.com/grafana/loki/pull/3880) **timothydlister**: Update fluent-plugin-loki documentation URLs #### Jsonnet + * [4629](https://github.com/grafana/loki/pull/4629) **owen-d**: Default wal to enabled in jsonnet lib * [4624](https://github.com/grafana/loki/pull/4624) **chaudum**: Disable chunk transfers in jsonnet lib * [4530](https://github.com/grafana/loki/pull/4530) **owen-d**: Jsonnet/overrides exporter @@ -1309,7 +1343,6 @@ Here is a list of all changes included in 2.4.0. * [4154](https://github.com/grafana/loki/pull/4154) **owen-d**: updates scheduler libsonnet * [4102](https://github.com/grafana/loki/pull/4102) **jeschkies**: Define ksonnet lib for query scheduler. - ### Notes This release was created from a branch starting at commit e95d193acf1633a6ec33a328b8a4a3d844e8e5f9 but it may also contain backported changes from main. @@ -1328,6 +1361,7 @@ Release notes for 2.3.0 can be found on the [release notes page](https://grafana ### All Changes #### Loki + * [4048](https://github.com/grafana/loki/pull/4048) **dannykopping**: Ruler: implementing write relabelling on recording rule samples * [4091](https://github.com/grafana/loki/pull/4091) **cyriltovena**: Fixes instant queries in the frontend. * [4087](https://github.com/grafana/loki/pull/4087) **cyriltovena**: Fixes unaligned shards between ingesters and storage. @@ -1447,6 +1481,7 @@ Release notes for 2.3.0 can be found on the [release notes page](https://grafana * [3050](https://github.com/grafana/loki/pull/3050) **cyriltovena**: first_over_time and last_over_time #### Docs + * [4031](https://github.com/grafana/loki/pull/4031) **KMiller-Grafana**: Docs: add weights to YAML metadata to order the LogQL subsections * [4029](https://github.com/grafana/loki/pull/4029) **bearice**: Docs: Update S3 permissions list * [4026](https://github.com/grafana/loki/pull/4026) **KMiller-Grafana**: Docs: correct fluentbit config value for DqueSync @@ -1526,6 +1561,7 @@ Release notes for 2.3.0 can be found on the [release notes page](https://grafana * [3430](https://github.com/grafana/loki/pull/3430) **kavirajk**: doc(gcplog): Add note on scraping multiple GCP projects #### Promtail + * [4011](https://github.com/grafana/loki/pull/4011) **dannykopping**: Promtail: adding pipeline stage inspector * [4006](https://github.com/grafana/loki/pull/4006) **dannykopping**: Promtail: output timestamp with nanosecond precision in dry-run mode * [3971](https://github.com/grafana/loki/pull/3971) **cyriltovena**: Fixes negative gauge in Promtail. @@ -1538,15 +1574,15 @@ Release notes for 2.3.0 can be found on the [release notes page](https://grafana * [3457](https://github.com/grafana/loki/pull/3457) **nmiculinic**: Promtail: Added path information to deleted tailed file * [3400](https://github.com/grafana/loki/pull/3400) **adityacs**: support max_message_length configuration for syslog parser - #### Logcli + * [3879](https://github.com/grafana/loki/pull/3879) **vyzigold**: logcli: Add retries to unsuccessful log queries * [3749](https://github.com/grafana/loki/pull/3749) **dbluxo**: logcli: add support for bearer token authentication * [3739](https://github.com/grafana/loki/pull/3739) **rsteneteg**: correct logcli instant query timestamp param name * [3678](https://github.com/grafana/loki/pull/3678) **cyriltovena**: Add the ability to wrap the roundtripper of the logcli client. - #### Build + * [4034](https://github.com/grafana/loki/pull/4034) **aknuds1**: loki-build-image: Fix building * [4028](https://github.com/grafana/loki/pull/4028) **aknuds1**: loki-build-image: Upgrade golangci-lint and Go * [4007](https://github.com/grafana/loki/pull/4007) **dannykopping**: Adding @grafana/loki-team as default CODEOWNERS @@ -1564,8 +1600,8 @@ Release notes for 2.3.0 can be found on the [release notes page](https://grafana * [3615](https://github.com/grafana/loki/pull/3615) **slim-bean**: Remove codecov * [3481](https://github.com/grafana/loki/pull/3481) **slim-bean**: Update Go and Alpine versions - #### Jsonnet + * [4030](https://github.com/grafana/loki/pull/4030) **cyriltovena**: Improve the sweep lag panel in the retention dashboard. * [3917](https://github.com/grafana/loki/pull/3917) **jvrplmlmn**: refactor(production/ksonnet): Remove kausal from the root element * [3893](https://github.com/grafana/loki/pull/3893) **sandeepsukhani**: update uid of loki-deletion dashboard @@ -1580,6 +1616,7 @@ Release notes for 2.3.0 can be found on the [release notes page](https://grafana * [3584](https://github.com/grafana/loki/pull/3584) **sandeepsukhani**: add loki resource usage dashboard for read and write path #### Project + * [3963](https://github.com/grafana/loki/pull/3963) **rfratto**: Remove Robert Fratto from list of team members * [3926](https://github.com/grafana/loki/pull/3926) **cyriltovena**: Add Danny Kopping to the Loki Team. * [3732](https://github.com/grafana/loki/pull/3732) **dannykopping**: Issue Templates: Improve wording and add warnings @@ -1590,6 +1627,7 @@ Release notes for 2.3.0 can be found on the [release notes page](https://grafana * [3630](https://github.com/grafana/loki/pull/3630) **slim-bean**: Re-license to AGPLv3 #### Docker-driver + * [3814](https://github.com/grafana/loki/pull/3814) **kavirajk**: Update the docker-driver doc about default labels * [3727](https://github.com/grafana/loki/pull/3727) **3Xpl0it3r**: docker-driver: remove duplicated code * [3709](https://github.com/grafana/loki/pull/3709) **cyriltovena**: Fixes docker driver that would panic when closed. @@ -1688,7 +1726,7 @@ TL;DR Loki 2.2 changes the internal chunk format which limits what versions you * [3237](https://github.com/grafana/loki/pull/3237) **cyriltovena**: Fixes unmarshalling of tailing responses. * [3236](https://github.com/grafana/loki/pull/3236) **slim-bean**: Loki: Log a crude lag metric for how far behind a client is. * [3234](https://github.com/grafana/loki/pull/3234) **cyriltovena**: Fixes previous commit not using the new sized body. -* [3233](https://github.com/grafana/loki/pull/3233) **cyriltovena**: Re-introduce https://github.com/grafana/loki/pull/3178. +* [3233](https://github.com/grafana/loki/pull/3233) **cyriltovena**: Re-introduce . * [3228](https://github.com/grafana/loki/pull/3228) **MichelHollands**: Add config endpoint * [3218](https://github.com/grafana/loki/pull/3218) **owen-d**: WAL backpressure * [3217](https://github.com/grafana/loki/pull/3217) **cyriltovena**: Rename checkpoint proto package to avoid conflict with cortex. @@ -1768,7 +1806,6 @@ TL;DR Loki 2.2 changes the internal chunk format which limits what versions you * [3270](https://github.com/grafana/loki/pull/3270) **chancez**: logcli: Fix handling of logcli query using --since/--from and --tail * [3229](https://github.com/grafana/loki/pull/3229) **dethi**: logcli: support --include-label when not using --tail - #### Jsonnet * [3447](https://github.com/grafana/loki/pull/3447) **owen-d**: Use better memory metric on operational dashboard @@ -1796,13 +1833,13 @@ TL;DR Loki 2.2 changes the internal chunk format which limits what versions you * [3240](https://github.com/grafana/loki/pull/3240) **sbaier1**: fix fluent-bit output plugin generating invalid JSON - #### Docker Logging Driver * [3331](https://github.com/grafana/loki/pull/3331) **cyriltovena**: Add pprof endpoint to docker-driver. * [3225](https://github.com/grafana/loki/pull/3225) **Le0tk0k**: (fix: cmd/docker-driver): Insert a space in the error message #### Docs + * [5934](https://github.com/grafana/loki/pull/5934) **johgsc**: Docs: revise modes of operation section * [3437](https://github.com/grafana/loki/pull/3437) **caleb15**: docs: add note about regex * [3421](https://github.com/grafana/loki/pull/3421) **kavirajk**: doc(gcplog): Advanced log export filter example @@ -1865,13 +1902,11 @@ TL;DR Loki 2.2 changes the internal chunk format which limits what versions you * [3031](https://github.com/grafana/loki/pull/3031) **AdamKorcz**: Testing: Introduced continuous fuzzing * [3006](https://github.com/grafana/loki/pull/3006) **huikang**: Fix the docker image version in compose deployment - #### Tooling * [3377](https://github.com/grafana/loki/pull/3377) **slim-bean**: Tooling: Update chunks-inspect to understand the new chunk format as well as new compression algorithms * [3151](https://github.com/grafana/loki/pull/3151) **slim-bean**: Loki migrate-tool - ### Notes This release was created from revision 8012362674568379a3871ff8c4a2bfd1ddba7ad1 (Which was PR 3460) @@ -1881,7 +1916,6 @@ This release was created from revision 8012362674568379a3871ff8c4a2bfd1ddba7ad1 * Go Version: 1.15.3 * Cortex Version: 7dac81171c665be071bd167becd1f55528a9db32 - ## 2.1.0 (2020/12/23) Happy Holidays from the Loki team! Please enjoy a new Loki release to welcome in the New Year! @@ -1890,15 +1924,15 @@ Happy Holidays from the Loki team! Please enjoy a new Loki release to welcome in ### Notable changes -#### Helm users read this! +#### Helm users read this The Helm charts have moved! * [2720](https://github.com/grafana/loki/pull/2720) **torstenwalter**: Deprecate Charts as they have been moved -This was done to consolidate Grafana's helm charts for all Grafana projects in one place: https://github.com/grafana/helm-charts/ +This was done to consolidate Grafana's helm charts for all Grafana projects in one place: -**From now moving forward, please use the new Helm repo url: https://grafana.github.io/helm-charts** +**From now moving forward, please use the new Helm repo url: ** The charts in the Loki repo will soon be removed so please update your Helm repo to the new URL and submit your PR's over there as well @@ -1906,7 +1940,7 @@ Special thanks to @torstenwalter, @unguiculus, and @scottrigby for their initiat Also go check out the microservices helm chart contributed by @unguiculus in the new repo! -#### Fluent bit plugin users read this! +#### Fluent bit plugin users read this Fluent bit officially supports Loki as an output plugin now! WoooHOOO! @@ -1960,7 +1994,6 @@ A number of performance and resource improvements have been made as well! * [2959](https://github.com/grafana/loki/pull/2959) **cyriltovena**: Improve tailer matcher function. * [2876](https://github.com/grafana/loki/pull/2876) **jkellerer**: LogQL: Add unwrap bytes() conversion function - #### Notable mentions Thanks to @timbyr for adding an often requested feature, the ability to support environment variable expansion in config files! @@ -1974,6 +2007,7 @@ Thanks to @huikang for adding a new docker-compose file for running Loki as micr ### All Changes #### Loki + * [2988](https://github.com/grafana/loki/pull/2988) **slim-bean**: Loki: handle faults when opening boltdb files * [2984](https://github.com/grafana/loki/pull/2984) **owen-d**: adds the ability to read chunkFormatV3 while writing v2 * [2983](https://github.com/grafana/loki/pull/2983) **slim-bean**: Loki: recover from panic opening boltdb files @@ -2000,6 +2034,7 @@ Thanks to @huikang for adding a new docker-compose file for running Loki as micr * [2751](https://github.com/grafana/loki/pull/2751) **jeschkies**: Logging: Log throughput and total bytes human readable. #### Helm + * [2986](https://github.com/grafana/loki/pull/2986) **cyriltovena**: Move CI to helm3. * [2967](https://github.com/grafana/loki/pull/2967) **czunker**: Remove `helm init` * [2965](https://github.com/grafana/loki/pull/2965) **czunker**: [Helm Chart Loki] Add needed k8s objects for alerting config @@ -2009,6 +2044,7 @@ Thanks to @huikang for adding a new docker-compose file for running Loki as micr * [2651](https://github.com/grafana/loki/pull/2651) **scottrigby**: helm chart: Fix broken logo #### Jsonnet + * [2976](https://github.com/grafana/loki/pull/2976) **beorn7**: Improve promtail alerts to retain the namespace label * [2961](https://github.com/grafana/loki/pull/2961) **sandeepsukhani**: add missing ingester query routes in loki reads and operational dashboard * [2899](https://github.com/grafana/loki/pull/2899) **halcyondude**: gateway: fix regression in tanka jsonnet @@ -2017,8 +2053,8 @@ Thanks to @huikang for adding a new docker-compose file for running Loki as micr * [2820](https://github.com/grafana/loki/pull/2820) **owen-d**: fixes promtail libsonnet tag. closes #2818 * [2718](https://github.com/grafana/loki/pull/2718) **halcyondude**: parameterize PVC storage class (ingester, querier, compactor) - #### Docs + * [2969](https://github.com/grafana/loki/pull/2969) **simonswine**: Add community forum to README.md * [2968](https://github.com/grafana/loki/pull/2968) **yuichi10**: logcli: Fix logcli logql document URL * [2942](https://github.com/grafana/loki/pull/2942) **hedss**: Docs: Corrects Fluent Bit documentation link to build the plugin. @@ -2039,27 +2075,30 @@ Thanks to @huikang for adding a new docker-compose file for running Loki as micr * [2636](https://github.com/grafana/loki/pull/2636) **LTek-online**: promtail documentation: changing the headers of the configuration docu to reflect configuration code #### Promtail + * [2957](https://github.com/grafana/loki/pull/2957) **slim-bean**: Promtail: Update debian image and use a newer libsystemd * [2928](https://github.com/grafana/loki/pull/2928) **cyriltovena**: Skip journald bad message. * [2914](https://github.com/grafana/loki/pull/2914) **chancez**: promtail: Add support for using syslog message timestamp * [2910](https://github.com/grafana/loki/pull/2910) **rfratto**: Expose underlying promtail client - #### Logcli + * [2948](https://github.com/grafana/loki/pull/2948) **tomwilkie**: Add a few more instructions to logcli --help. #### Build + * [2877](https://github.com/grafana/loki/pull/2877) **cyriltovena**: Update to go 1.15 * [2814](https://github.com/grafana/loki/pull/2814) **torkelo**: Stats: Adding metrics collector GitHub action #### Fluentd + * [2825](https://github.com/grafana/loki/pull/2825) **cyriltovena**: Bump fluentd plugin * [2434](https://github.com/grafana/loki/pull/2434) **andsens**: fluent-plugin: Improve escaping in key_value format - ### Notes This release was created from revision ae9c4b82ec4a5d21267da50d6a1a8170e0ef82ff (Which was PR 2960) and the following PR's were cherry-picked + * [2984](https://github.com/grafana/loki/pull/2984) **owen-d**: adds the ability to read chunkFormatV3 while writing v2 * [2974](https://github.com/grafana/loki/pull/2974) **hedss**: fluent-bit: Rename Fluent Bit plugin output name. @@ -2100,7 +2139,7 @@ Thanks again for the many incredible contributions and improvements from the won Check the [upgrade guide](https://github.com/grafana/loki/blob/master/docs/sources/setup/upgrade/_index.md#200) for detailed information on all these changes. -### 2.0!!!! +### 2.0 There are too many PR's to list individually for the major improvements which we thought justified a 2.0 but here is the high level: @@ -2127,9 +2166,10 @@ Thank you @dlemel8 for this PR! Now you can start Loki with `-verify-config` to ### All Changes #### Loki + * [2804](https://github.com/grafana/loki/pull/2804) **slim-bean**: Loki: log any chunk fetch failure * [2803](https://github.com/grafana/loki/pull/2803) **slim-bean**: Update local and docker default config files to use boltdb-shipper with a few other config changes -* [2796](https://github.com/grafana/loki/pull/2796) **cyriltovena**: Fixes a bug that would add __error__ label incorrectly. +* [2796](https://github.com/grafana/loki/pull/2796) **cyriltovena**: Fixes a bug that would add **error** label incorrectly. * [2793](https://github.com/grafana/loki/pull/2793) **cyriltovena**: Improve the way we reverse iterator for backward queries. * [2790](https://github.com/grafana/loki/pull/2790) **sandeepsukhani**: Boltdb shipper metrics changes * [2788](https://github.com/grafana/loki/pull/2788) **sandeepsukhani**: add a metric in compactor to record timestamp of last successful run @@ -2193,6 +2233,7 @@ Thank you @dlemel8 for this PR! Now you can start Loki with `-verify-config` to * [2487](https://github.com/grafana/loki/pull/2487) **sandeepsukhani**: upload boltdb files from shipper only when they are not expected to be modified or during shutdown #### Docs + * [2797](https://github.com/grafana/loki/pull/2797) **cyriltovena**: Logqlv2 docs * [2772](https://github.com/grafana/loki/pull/2772) **DesistDaydream**: reapir Retention Example Configuration * [2762](https://github.com/grafana/loki/pull/2762) **PabloCastellano**: fix: typo in upgrade.md @@ -2234,18 +2275,22 @@ Thank you @dlemel8 for this PR! Now you can start Loki with `-verify-config` to * [2500](https://github.com/grafana/loki/pull/2500) **oddlittlebird**: Docs: Update README.md #### Helm + * [2746](https://github.com/grafana/loki/pull/2746) **marcosartori**: helm/fluentbit K8S-Logging.Exclude & and Mem_Buf_Limit toggle * [2742](https://github.com/grafana/loki/pull/2742) **steven-sheehy**: Fix linting errors and use of deprecated repositories * [2659](https://github.com/grafana/loki/pull/2659) **rskrishnar**: [Promtail] enables configuring psp in helm chart * [2554](https://github.com/grafana/loki/pull/2554) **alexandre-allard-scality**: production/helm: add support for PV selector in Loki statefulset #### FluentD + * [2739](https://github.com/grafana/loki/pull/2739) **jgehrcke**: FluentD loki plugin: add support for bearer_token_file parameter #### Fluent Bit + * [2568](https://github.com/grafana/loki/pull/2568) **zjj2wry**: fluent-bit plugin support TLS #### Promtail + * [2723](https://github.com/grafana/loki/pull/2723) **carlpett**: Promtail: Add counter promtail_batch_retries_total * [2717](https://github.com/grafana/loki/pull/2717) **slim-bean**: Promtail: Fix deadlock on tailer shutdown. * [2710](https://github.com/grafana/loki/pull/2710) **slim-bean**: Promtail: (and also fluent-bit) change the max batch size to 1MB @@ -2262,6 +2307,7 @@ Thank you @dlemel8 for this PR! Now you can start Loki with `-verify-config` to * [2532](https://github.com/grafana/loki/pull/2532) **slim-bean**: Promtail: Restart the tailer if we fail to read and upate current position #### Ksonnet + * [2719](https://github.com/grafana/loki/pull/2719) **halcyondude**: nit: fix formatting for ksonnet/loki * [2677](https://github.com/grafana/loki/pull/2677) **sandeepsukhani**: fix jsonnet for memcached-writes when using boltdb-shipper * [2617](https://github.com/grafana/loki/pull/2617) **periklis**: Add config options for loki dashboards @@ -2276,15 +2322,18 @@ Thank you @dlemel8 for this PR! Now you can start Loki with `-verify-config` to * [2494](https://github.com/grafana/loki/pull/2494) **primeroz**: Jsonnet Promtail: Change function for mounting configmap in promtail daemonset #### Logstash + * [2607](https://github.com/grafana/loki/pull/2607) **adityacs**: Logstash cpu usage fix #### Build + * [2602](https://github.com/grafana/loki/pull/2602) **sandeepsukhani**: add support for building querytee * [2561](https://github.com/grafana/loki/pull/2561) **tharun208**: Added logcli docker image * [2549](https://github.com/grafana/loki/pull/2549) **simnv**: Ignore .exe files build for Windows * [2527](https://github.com/grafana/loki/pull/2527) **owen-d**: Update docker-compose.yaml to use 1.6.0 #### Docker Logging Driver + * [2459](https://github.com/grafana/loki/pull/2459) **RaitoBezarius**: Docker logging driver: Add a keymod for the extra attributes from the Docker logging driver ### Dependencies @@ -2378,7 +2427,6 @@ If you are using the query-frontend: * [2336](https://github.com/grafana/loki/pull/2336) provides two new flags that will print the entire Loki config object at startup. Be warned there are a lot of config options, and many won’t apply to your setup (such as storage configs you aren’t using), but this can be a really useful tool when troubleshooting. Sticking with the theme of best for last, * [2224](https://github.com/grafana/loki/pull/2224) and [2288](https://github.com/grafana/loki/pull/2288) improve support for running Loki with a shared Ring using memberlist while not requiring Consul or Etcd. We need to follow up soon with some better documentation or a blog post on this! - ### Dependencies * Go Version: 1.14.2 @@ -2387,6 +2435,7 @@ If you are using the query-frontend: ### All Changes #### Loki + * [2484](https://github.com/grafana/loki/pull/2484) **slim-bean**: Loki: fix batch iterator error when all chunks overlap and chunk time ranges are greater than query time range * [2483](https://github.com/grafana/loki/pull/2483) **sandeepsukhani**: download boltdb files parallelly during reads * [2472](https://github.com/grafana/loki/pull/2472) **owen-d**: series endpoint uses normal splits @@ -2442,6 +2491,7 @@ If you are using the query-frontend: * [2032](https://github.com/grafana/loki/pull/2032) **tivvit**: Added support for tail to query frontend #### Promtail + * [2496](https://github.com/grafana/loki/pull/2496) **slim-bean**: Promtail: Drop stage * [2475](https://github.com/grafana/loki/pull/2475) **slim-bean**: Promtail: force the log level on any Loki Push API target servers to match Promtail's log level. * [2474](https://github.com/grafana/loki/pull/2474) **slim-bean**: Promtail: use --client.external-labels for all clients @@ -2466,6 +2516,7 @@ If you are using the query-frontend: * [2087](https://github.com/grafana/loki/pull/2087) **adityacs**: Set JournalTarget Priority value to keyword #### Logcli + * [2497](https://github.com/grafana/loki/pull/2497) **slim-bean**: logcli: adds --analyize-labels to logcli series command and changes how labels are provided to the command * [2482](https://github.com/grafana/loki/pull/2482) **slim-bean**: Logcli: automatically batch requests * [2470](https://github.com/grafana/loki/pull/2470) **adityacs**: colored labels output for logcli @@ -2474,10 +2525,11 @@ If you are using the query-frontend: * [2083](https://github.com/grafana/loki/pull/2083) **adityacs**: Support querying labels on time range in logcli #### Docs + * [2473](https://github.com/grafana/loki/pull/2473) **owen-d**: fixes lambda-promtail relative doc link * [2454](https://github.com/grafana/loki/pull/2454) **oddlittlebird**: Create CODEOWNERS * [2439](https://github.com/grafana/loki/pull/2439) **till**: Docs: updated "Upgrading" for docker driver -* [2437](https://github.com/grafana/loki/pull/2437) **wardbekker**: DOCS: clarified globbing behaviour of __path__ of the doublestar library +* [2437](https://github.com/grafana/loki/pull/2437) **wardbekker**: DOCS: clarified globbing behaviour of **path** of the doublestar library * [2431](https://github.com/grafana/loki/pull/2431) **endu**: fix dead link * [2425](https://github.com/grafana/loki/pull/2425) **RichiH**: Change conduct contact email address * [2420](https://github.com/grafana/loki/pull/2420) **petuhovskiy**: Fix docker driver doc @@ -2524,9 +2576,11 @@ If you are using the query-frontend: * [2092](https://github.com/grafana/loki/pull/2092) **i-takizawa**: docs: make visible #### Build + * [2467](https://github.com/grafana/loki/pull/2467) **slim-bean**: Update Loki build image #### Ksonnet + * [2460](https://github.com/grafana/loki/pull/2460) **Duologic**: refactor: use $.core.v1.envVar * [2452](https://github.com/grafana/loki/pull/2452) **slim-bean**: ksonnet: Reduce querier parallelism to a more sane default value and remove the default setting for storage_backend * [2377](https://github.com/grafana/loki/pull/2377) **Duologic**: refactor: moved jaeger-agent-mixin @@ -2536,6 +2590,7 @@ If you are using the query-frontend: * [2091](https://github.com/grafana/loki/pull/2091) **beorn7**: Keep scrape config in line with the new Prometheus scrape config #### Docker logging driver + * [2435](https://github.com/grafana/loki/pull/2435) **cyriltovena**: Add more precisions on the docker driver installed on the daemon. * [2343](https://github.com/grafana/loki/pull/2343) **jdfalk**: loki-docker-driver: Change "ignoring empty line" to debug logging * [2295](https://github.com/grafana/loki/pull/2295) **cyriltovena**: Remove mount in the docker driver. @@ -2543,11 +2598,13 @@ If you are using the query-frontend: * [2116](https://github.com/grafana/loki/pull/2116) **cyriltovena**: Allows to change the log driver mode and buffer size. #### Logstash output plugin + * [2415](https://github.com/grafana/loki/pull/2415) **cyriltovena**: Set service values via --set for logstash. * [2410](https://github.com/grafana/loki/pull/2410) **adityacs**: logstash code refactor and doc improvements * [1822](https://github.com/grafana/loki/pull/1822) **adityacs**: Loki Logstash Plugin #### Loki canary + * [2413](https://github.com/grafana/loki/pull/2413) **slim-bean**: Loki-Canary: Backoff retries on query failures, add histograms for query performance. * [2369](https://github.com/grafana/loki/pull/2369) **slim-bean**: Loki Canary: One more round of improvements to query for missing websocket entries up to max-wait * [2350](https://github.com/grafana/loki/pull/2350) **slim-bean**: Canary tweaks @@ -2555,12 +2612,14 @@ If you are using the query-frontend: * [2259](https://github.com/grafana/loki/pull/2259) **ombre8**: Canary: make stream configurable #### Fluentd + * [2407](https://github.com/grafana/loki/pull/2407) **cyriltovena**: bump fluentd version to release a new gem. * [2399](https://github.com/grafana/loki/pull/2399) **tarokkk**: fluentd: Make fluentd version requirements permissive * [2179](https://github.com/grafana/loki/pull/2179) **takanabe**: Improve fluentd plugin development experience * [2171](https://github.com/grafana/loki/pull/2171) **takanabe**: Add server TLS certificate verification #### Fluent Bit + * [2375](https://github.com/grafana/loki/pull/2375) **cyriltovena**: Fixes the fluentbit batchwait backward compatiblity. * [2367](https://github.com/grafana/loki/pull/2367) **dojci**: fluent-bit: Add more loki client configuration options * [2365](https://github.com/grafana/loki/pull/2365) **dojci**: fluent-bit: Fix fluent-bit exit callback when buffering is enabled @@ -2570,6 +2629,7 @@ If you are using the query-frontend: * [2089](https://github.com/grafana/loki/pull/2089) **FrederikNS**: Allow configuring more options for output configuration #### Helm + * [2406](https://github.com/grafana/loki/pull/2406) **steven-sheehy**: Helm: Fix regression in chart name * [2379](https://github.com/grafana/loki/pull/2379) **StevenReitsma**: production/helm: Add emptyDir volume type to promtail PSP * [2366](https://github.com/grafana/loki/pull/2366) **StevenReitsma**: production/helm: Add projected and downwardAPI volume types to PodSecurityPolicy (#2355) @@ -2584,6 +2644,7 @@ If you are using the query-frontend: * [2091](https://github.com/grafana/loki/pull/2091) **beorn7**: Keep scrape config in line with the new Prometheus scrape config #### Build + * [2371](https://github.com/grafana/loki/pull/2371) **cyriltovena**: Fixes helm publish that needs now to add repo. * [2341](https://github.com/grafana/loki/pull/2341) **slim-bean**: Build: Fix CI helm test * [2309](https://github.com/grafana/loki/pull/2309) **cyriltovena**: Test again arm32 on internal ci. @@ -2591,7 +2652,6 @@ If you are using the query-frontend: * [2287](https://github.com/grafana/loki/pull/2287) **wardbekker**: Change the Grafana image to latest * [2212](https://github.com/grafana/loki/pull/2212) **roidelapluie**: Remove unhelpful/problematic term in circleci.yml - ## 1.5.0 (2020-05-20) It's been a busy month and a half since 1.4.0 was released, and a lot of new improvements have been added to Loki since! @@ -2749,6 +2809,7 @@ We now GPG sign helm packages! * [1706](https://github.com/grafana/loki/pull/1706) **cyriltovena**: Non-root user docker image for Loki. #### Logcli + * [2027](https://github.com/grafana/loki/pull/2027) **pstibrany**: logcli: Query needs to be stored into url.RawQuery, and not url.Path * [2000](https://github.com/grafana/loki/pull/2000) **cyriltovena**: Improve URL building in the logcli to strip trailing /. * [1922](https://github.com/grafana/loki/pull/1922) **bavarianbidi**: logcli: org-id/tls-skip-verify set via env var @@ -2758,6 +2819,7 @@ We now GPG sign helm packages! * [1712](https://github.com/grafana/loki/pull/1712) **rfratto**: clarify logcli commands and output #### Promtail + * [2069](https://github.com/grafana/loki/pull/2069) **slim-bean**: Promtail: log at debug level when nothing matches the specified path for a file target * [2066](https://github.com/grafana/loki/pull/2066) **slim-bean**: Promtail: metrics stage can also count line bytes * [2049](https://github.com/grafana/loki/pull/2049) **adityacs**: Fix promtail client default values @@ -2772,12 +2834,14 @@ We now GPG sign helm packages! * [1627](https://github.com/grafana/loki/pull/1627) **rfratto**: Proposal: Promtail Push API #### Docker Driver + * [2076](https://github.com/grafana/loki/pull/2076) **cyriltovena**: Allows to pass inlined pipeline stages to the docker driver. * [2054](https://github.com/grafana/loki/pull/2054) **bkmit**: Docker driver: Allow to provision external pipeline files to plugin * [1906](https://github.com/grafana/loki/pull/1906) **cyriltovena**: Add no-file and keep-file log option for docker driver. * [1903](https://github.com/grafana/loki/pull/1903) **cyriltovena**: Log docker driver config map. #### Fluentd + * [2074](https://github.com/grafana/loki/pull/2074) **osela**: fluentd plugin: support placeholders in tenant field * [2006](https://github.com/grafana/loki/pull/2006) **Skeen**: fluent-plugin-loki: Restructuring and CI * [1909](https://github.com/grafana/loki/pull/1909) **jgehrcke**: fluentd loki plugin README: add note about labels @@ -2785,15 +2849,18 @@ We now GPG sign helm packages! * [1811](https://github.com/grafana/loki/pull/1811) **JamesJJ**: Error handling: Show data stream at "debug" level, not "warn" #### Fluent Bit + * [2040](https://github.com/grafana/loki/pull/2040) **avii-ridge**: Add extraOutputs variable to support multiple outputs for fluent-bit * [1915](https://github.com/grafana/loki/pull/1915) **DirtyCajunRice**: Fix fluent-bit metrics * [1890](https://github.com/grafana/loki/pull/1890) **dottedmag**: fluentbit: JSON encoding: avoid base64 encoding of []byte inside other slices * [1791](https://github.com/grafana/loki/pull/1791) **cyriltovena**: Improve fluentbit logfmt. #### Ksonnet + * [1980](https://github.com/grafana/loki/pull/1980) **cyriltovena**: Log slow query from the frontend by default in ksonnet. ##### Mixins + * [2080](https://github.com/grafana/loki/pull/2080) **beorn7**: mixin: Accept suffixes to pod name in instance labels * [2044](https://github.com/grafana/loki/pull/2044) **slim-bean**: Dashboards: fixes the cpu usage graphs * [2043](https://github.com/grafana/loki/pull/2043) **joe-elliott**: Swapped to container restarts over terminated reasons @@ -2802,6 +2869,7 @@ We now GPG sign helm packages! * [1913](https://github.com/grafana/loki/pull/1913) **tomwilkie**: s/dashboards/grafanaDashboards. #### Helm + * [2038](https://github.com/grafana/loki/pull/2038) **oke-py**: Docs: update Loki Helm Chart document to support Helm 3 * [2015](https://github.com/grafana/loki/pull/2015) **etashsingh**: Change image tag from 1.4.1 to 1.4.0 in Helm chart * [1981](https://github.com/grafana/loki/pull/1981) **sshah90**: added extraCommandlineArgs in values file @@ -2813,9 +2881,11 @@ We now GPG sign helm packages! * [1817](https://github.com/grafana/loki/pull/1817) **bclermont**: Helm chart: Prevent prometheus to scrape both services #### Loki Canary + * [1891](https://github.com/grafana/loki/pull/1891) **joe-elliott**: Addition of a `/suspend` endpoint to Loki Canary #### Docs + * [2056](https://github.com/grafana/loki/pull/2056) **cyriltovena**: Update api.md * [2014](https://github.com/grafana/loki/pull/2014) **jsoref**: Spelling * [1999](https://github.com/grafana/loki/pull/1999) **oddlittlebird**: Docs: Added labels content @@ -2836,6 +2906,7 @@ We now GPG sign helm packages! * [1843](https://github.com/grafana/loki/pull/1843) **vishesh92**: Docs: Update configuration docs for redis #### Build + * [2042](https://github.com/grafana/loki/pull/2042) **rfratto**: Fix drone * [2009](https://github.com/grafana/loki/pull/2009) **cyriltovena**: Adds :delegated flags to speed up build experience on MacOS. * [1942](https://github.com/grafana/loki/pull/1942) **owen-d**: delete tag script filters by prefix instead of substring @@ -2843,7 +2914,6 @@ We now GPG sign helm packages! * [1911](https://github.com/grafana/loki/pull/1911) **slim-bean**: build: push images for `k` branches * [1849](https://github.com/grafana/loki/pull/1849) **cyriltovena**: Pin helm version in circle-ci helm testing workflow. - ## 1.4.1 (2020-04-06) We realized after the release last week that piping data into promtail was not working on Linux or Windows, this should fix this issue for both platforms: @@ -2942,6 +3012,7 @@ The second place would be the log file itself. At some point, most log files rol There are many other important fixes and improvements to Loki, way too many to call out in individual detail, so take a look! #### Loki + * [1810](https://github.com/grafana/loki/pull/1810) **cyriltovena**: Optimize empty filter queries. * [1809](https://github.com/grafana/loki/pull/1809) **cyriltovena**: Test stats memchunk * [1807](https://github.com/grafana/loki/pull/1807) **pracucci**: Enable global limits by default in production mixin @@ -2989,6 +3060,7 @@ There are many other important fixes and improvements to Loki, way too many to c * [1484](https://github.com/grafana/loki/pull/1484) **pstibrany**: loki: use new runtimeconfig package from Cortex #### Promtail + * [1840](https://github.com/grafana/loki/pull/1840) **slim-bean**: promtail: Retry 429 rate limit errors from Loki, increase default retry limits * [1775](https://github.com/grafana/loki/pull/1775) **slim-bean**: promtail: remove the read lines counter when the log file stops being tailed * [1770](https://github.com/grafana/loki/pull/1770) **adityacs**: Fix single job with multiple service discovery elements @@ -3005,6 +3077,7 @@ There are many other important fixes and improvements to Loki, way too many to c * [1602](https://github.com/grafana/loki/pull/1602) **slim-bean**: Improve promtail configuration docs #### Helm + * [1731](https://github.com/grafana/loki/pull/1731) **billimek**: [promtail helm chart] - Expand promtail syslog svc to support values * [1688](https://github.com/grafana/loki/pull/1688) **fredgate**: Loki stack helm chart can deploy datasources without Grafana * [1632](https://github.com/grafana/loki/pull/1632) **lukipro**: Added support for imagePullSecrets in Loki Helm chart @@ -3017,6 +3090,7 @@ There are many other important fixes and improvements to Loki, way too many to c * [1529](https://github.com/grafana/loki/pull/1529) **tourea**: Promtail Helm Chart: Add support for passing environment variables #### Jsonnet + * [1776](https://github.com/grafana/loki/pull/1776) **Eraac**: fix typo: Not a binary operator: = * [1767](https://github.com/grafana/loki/pull/1767) **joe-elliott**: Dashboard Cleanup * [1766](https://github.com/grafana/loki/pull/1766) **joe-elliott**: Move dashboards out into their own json files @@ -3033,10 +3107,12 @@ There are many other important fixes and improvements to Loki, way too many to c * [1613](https://github.com/grafana/loki/pull/1613) **cyriltovena**: Fixes config change in the result cache #### Fluent Bit + * [1791](https://github.com/grafana/loki/pull/1791) **cyriltovena**: Improve fluentbit logfmt. * [1717](https://github.com/grafana/loki/pull/1717) **adityacs**: Fluent-bit: Fix panic error when AutoKubernetesLabels is true #### Fluentd + * [1811](https://github.com/grafana/loki/pull/1811) **JamesJJ**: Error handling: Show data stream at "debug" level, not "warn" * [1728](https://github.com/grafana/loki/pull/1728) **irake99**: docs: fix outdated link to fluentd * [1703](https://github.com/grafana/loki/pull/1703) **Skeen**: fluent-plugin-grafana-loki: Update fluentd base image to current images (edge) @@ -3045,6 +3121,7 @@ There are many other important fixes and improvements to Loki, way too many to c * [1603](https://github.com/grafana/loki/pull/1603) **tarokkk**: fluentd-plugin: add URI validation #### Docs + * [1781](https://github.com/grafana/loki/pull/1781) **candlerb**: Docs: Recommended schema is now v11 * [1771](https://github.com/grafana/loki/pull/1771) **rfratto**: change slack url to slack.grafana.com and use https * [1738](https://github.com/grafana/loki/pull/1738) **jgehrcke**: docs: observability.md: clarify lines vs. entries @@ -3067,6 +3144,7 @@ There are many other important fixes and improvements to Loki, way too many to c * [1504](https://github.com/grafana/loki/pull/1504) **hsraju**: Updated configuration.md #### Logcli + * [1808](https://github.com/grafana/loki/pull/1808) **slim-bean**: logcli: log the full stats and send to stderr instead of stdout * [1682](https://github.com/grafana/loki/pull/1682) **adityacs**: BugFix: Fix logcli --quiet parameter parsing issue * [1644](https://github.com/grafana/loki/pull/1644) **cyriltovena**: This improves the log output for statistics in the logcli. @@ -3074,10 +3152,12 @@ There are many other important fixes and improvements to Loki, way too many to c * [1573](https://github.com/grafana/loki/pull/1573) **cyriltovena**: Improve logql query statistics collection. #### Loki Canary + * [1653](https://github.com/grafana/loki/pull/1653) **slim-bean**: Canary needs its logo * [1581](https://github.com/grafana/loki/pull/1581) **slim-bean**: Add sleep to canary reconnect on error #### Build + * [1780](https://github.com/grafana/loki/pull/1780) **slim-bean**: build: Update the CD deploy task name * [1762](https://github.com/grafana/loki/pull/1762) **dgzlopes**: Bump testify to 1.5.1 * [1742](https://github.com/grafana/loki/pull/1742) **slim-bean**: build: fix deploy on tagged build @@ -3095,6 +3175,7 @@ There are many other important fixes and improvements to Loki, way too many to c * [1600](https://github.com/grafana/loki/pull/1600) **mattmendick**: Codecov circleci test [WIP] #### Tooling + * [1577](https://github.com/grafana/loki/pull/1577) **pstibrany**: Move chunks-inspect tool to Loki repo ## 1.3.0 (2020-01-16) @@ -3139,7 +3220,7 @@ And last but not least on the notable changes list is a new feature for Promtail With this change Promtail can receive syslogs via TCP! Thanks to @bastjan for all the hard work on this submission! -### Important things to note: +### Important things to note * [1519](https://github.com/grafana/loki/pull/1519) Changes a core behavior in Loki regarding logs with duplicate content AND duplicate timestamps, previously Loki would store logs with duplicate timestamps and content, moving forward logs with duplicate content AND timestamps will be silently ignored. Mainly this change is to prevent duplicates that appear when a batch is retried (the first entry in the list would be inserted again, now it will be ignored). Logs with the same timestamp and different content will still be accepted. * [1486](https://github.com/grafana/loki/pull/1486) Deprecated `-distributor.limiter-reload-period` flag / distributor's `limiter_reload_period` config option. @@ -3149,6 +3230,7 @@ With this change Promtail can receive syslogs via TCP! Thanks to @bastjan for a Once again we can't thank our community and contributors enough for the significant work that everyone is adding to Loki, the entire list of changes is long!! #### Loki + * [1526](https://github.com/grafana/loki/pull/1526) **codesome**: Support for aggregation * [1522](https://github.com/grafana/loki/pull/1522) **cyriltovena**: Adds support for the old query string regexp in the frontend. * [1519](https://github.com/grafana/loki/pull/1519) **rfratto**: pkg/chunkenc: ignore duplicate lines pushed to a stream @@ -3186,6 +3268,7 @@ Once again we can't thank our community and contributors enough for the signific * [1541](https://github.com/grafana/loki/pull/1541) **owen-d**: legacy endpoint 400s metric queries #### Promtail + * [1515](https://github.com/grafana/loki/pull/1515) **slim-bean**: Promtail: Improve position and size metrics * [1485](https://github.com/grafana/loki/pull/1485) **p37ruh4**: Fileglob parsing fixes * [1472](https://github.com/grafana/loki/pull/1472) **owen-d**: positions.ignore-corruptions @@ -3196,21 +3279,26 @@ Once again we can't thank our community and contributors enough for the signific * [1275](https://github.com/grafana/loki/pull/1275) **bastjan**: pkg/promtail: IETF Syslog (RFC5424) Support #### Fluent Bit + * [1455](https://github.com/grafana/loki/pull/1455) **JensErat**: fluent-bit-plugin: re-enable failing JSON marshaller tests; pass error instead of logging and ignoring * [1294](https://github.com/grafana/loki/pull/1294) **JensErat**: fluent-bit: multi-instance support * [1514](https://github.com/grafana/loki/pull/1514) **shane-axiom**: fluent-plugin-grafana-loki: Add `fluentd_thread` label when `flush_thread_count` > 1 #### Fluentd + * [1500](https://github.com/grafana/loki/pull/1500) **cyriltovena**: Bump fluentd plugin to 1.2.6. * [1475](https://github.com/grafana/loki/pull/1475) **Horkyze**: fluentd-plugin: call gsub for strings only #### Docker Driver + * [1414](https://github.com/grafana/loki/pull/1414) **cyriltovena**: Adds tenant-id for docker driver. #### Logcli -* [1492](https://github.com/grafana/loki/pull/1492) **sandlis**: logcli: replaced GRAFANA_* with LOKI_* in logcli env vars, set default server url for logcli to localhost + +* [1492](https://github.com/grafana/loki/pull/1492) **sandlis**: logcli: replaced GRAFANA_*with LOKI_* in logcli env vars, set default server url for logcli to localhost #### Helm + * [1534](https://github.com/grafana/loki/pull/1534) **olivierboudet**: helm : fix fluent-bit parser configuration syntax * [1506](https://github.com/grafana/loki/pull/1506) **terjesannum**: helm: add podsecuritypolicy for fluent-bit * [1431](https://github.com/grafana/loki/pull/1431) **eugene100**: Helm: fix issue with config.clients @@ -3220,6 +3308,7 @@ Once again we can't thank our community and contributors enough for the signific * [1530](https://github.com/grafana/loki/pull/1530) **WeiBanjo**: Allow extra command line args for external labels like hostname #### Jsonnet + * [1518](https://github.com/grafana/loki/pull/1518) **benjaminhuo**: Fix error 'Field does not exist: jaeger_mixin' in tk show * [1501](https://github.com/grafana/loki/pull/1501) **anarcher**: jsonnet: fix common/defaultPorts parameters * [1497](https://github.com/grafana/loki/pull/1497) **cyriltovena**: Update Loki mixin to include frontend QPS and latency. @@ -3227,6 +3316,7 @@ Once again we can't thank our community and contributors enough for the signific * [1543](https://github.com/grafana/loki/pull/1543) **sh0rez**: fix(ksonnet): use apps/v1 #### Docs + * [1531](https://github.com/grafana/loki/pull/1531) **fitzoh**: Documentation: Add note on using Loki with Amazon ECS * [1521](https://github.com/grafana/loki/pull/1521) **rfratto**: docs: Document timestamp ordering rules * [1516](https://github.com/grafana/loki/pull/1516) **rfratto**: Link to release docs in README.md, not master docs @@ -3247,12 +3337,14 @@ Once again we can't thank our community and contributors enough for the signific * [1539](https://github.com/grafana/loki/pull/1539) **j18e**: docs: fix syntax error in pipeline example #### Build + * [1494](https://github.com/grafana/loki/pull/1494) **pracucci**: Fixed TOUCH_PROTOS in all DroneCI pipelines * [1479](https://github.com/grafana/loki/pull/1479) **owen-d**: TOUCH_PROTOS build arg for dockerfile * [1476](https://github.com/grafana/loki/pull/1476) **owen-d**: initiates docker daemon for circle windows builds * [1469](https://github.com/grafana/loki/pull/1469) **rfratto**: Makefile: re-enable journal scraping on ARM -#### New Members! +#### New Members + * [1415](https://github.com/grafana/loki/pull/1415) **cyriltovena**: Add Joe as member of the team. # 1.2.0 (2019-12-09) @@ -3302,6 +3394,7 @@ Some might call this a **breaking change**, we are instead calling it a bug fix **But please be aware if you are using the `/loki/api/v1/label` or `/loki/api/v1/label//values` the JSON result will be different in 1.1.0** Old result: + ```json { "values": [ @@ -3311,6 +3404,7 @@ Old result: ] } ``` + New result: ```json @@ -3355,7 +3449,6 @@ Binaries will now be zipped instead of gzipped as many people voiced their opini [1357](https://github.com/grafana/loki/pull/1357) **cyriltovena**: Supports same duration format in LogQL as Prometheus - ## Everything Else :heart: All PR's are important to us, thanks everyone for continuing to help support and improve Loki! :heart: @@ -3377,7 +3470,7 @@ Binaries will now be zipped instead of gzipped as many people voiced their opini * [1311](https://github.com/grafana/loki/pull/1311) **pstibrany**: Include positions filename in the error when YAML unmarshal fails. * [1310](https://github.com/grafana/loki/pull/1310) **JensErat**: fluent-bit: sorted JSON and properly convert []byte to string * [1304](https://github.com/grafana/loki/pull/1304) **pstibrany**: promtail: write positions to new file first, move to target location afterwards -* [1303](https://github.com/grafana/loki/pull/1303) **zhangjianweibj**: https://github.com/grafana/loki/issues/1302 +* [1303](https://github.com/grafana/loki/pull/1303) **zhangjianweibj**: * [1298](https://github.com/grafana/loki/pull/1298) **rfratto**: pkg/promtail: remove journal target forced path * [1279](https://github.com/grafana/loki/pull/1279) **rfratto**: Fix loki_discarded_samples_total metric * [1278](https://github.com/grafana/loki/pull/1278) **rfratto**: docs: update limits_config to new structure from #948 @@ -3391,8 +3484,6 @@ Binaries will now be zipped instead of gzipped as many people voiced their opini * [1223](https://github.com/grafana/loki/pull/1223) **jgehrcke**: authentication.md: replace "user" with "tenant" * [1204](https://github.com/grafana/loki/pull/1204) **allanhung**: fluent-bit-plugin: Auto add Kubernetes labels to Loki labels - - # 1.0.0 (2019-11-19) :tada: Nearly a year since Loki was announced at KubeCon in Seattle 2018 we are very excited to announce the 1.0.0 release of Loki! :tada: @@ -3470,7 +3561,6 @@ A **huge** thanks to the **36 contributors** who submitted **148 PR's** since 0. * PR [1062](https://github.com/grafana/loki/pull/1062) and [1089](https://github.com/grafana/loki/pull/1089) have moved Loki from Dep to Go Modules and to Go 1.13 - ## Loki ### Features/Improvements/Changes @@ -3507,6 +3597,7 @@ A **huge** thanks to the **36 contributors** who submitted **148 PR's** since 0. * **Loki** [654](https://github.com/grafana/loki/pull/654) **cyriltovena**: LogQL: Vector and Range Vector Aggregation. ### Bug Fixes + * **Loki** [1114](https://github.com/grafana/loki/pull/1114) **rfratto**: pkg/ingester: prevent shutdowns from processing during joining handoff * **Loki** [1097](https://github.com/grafana/loki/pull/1097) **joe-elliott**: Reverted cloud.google.com/go to 0.44.1 * **Loki** [986](https://github.com/grafana/loki/pull/986) **pracucci**: Fix panic in tailer due to race condition between send() and close() @@ -3550,7 +3641,7 @@ A **huge** thanks to the **36 contributors** who submitted **148 PR's** since 0. * **Docs** [1094](https://github.com/grafana/loki/pull/1094) **rfratto**: docs: update stages README with the docker and cri stages * **Docs** [1091](https://github.com/grafana/loki/pull/1091) **daixiang0**: docs(stage): add docker and cri * **Docs** [1077](https://github.com/grafana/loki/pull/1077) **daixiang0**: doc(fluent-bit): add missing namespace -* **Docs** [1073](https://github.com/grafana/loki/pull/1073) **flouthoc**: Re Fix Docs: PR https://github.com/grafana/loki/pull/1053 got erased due to force push. +* **Docs** [1073](https://github.com/grafana/loki/pull/1073) **flouthoc**: Re Fix Docs: PR got erased due to force push. * **Docs** [1069](https://github.com/grafana/loki/pull/1069) **daixiang0**: doc: unify GOPATH * **Docs** [1068](https://github.com/grafana/loki/pull/1068) **daixiang0**: doc: skip jb init when using Tanka * **Docs** [1067](https://github.com/grafana/loki/pull/1067) **rfratto**: Fix broken links to docs in README.md @@ -3660,21 +3751,10 @@ Loki is now using a Bot to help keep issues and PR's pruned based on age/relevan * **Github** [965](https://github.com/grafana/loki/pull/965) **rfratto**: Change label used to keep issues from being marked as stale to keepalive * **Github** [964](https://github.com/grafana/loki/pull/964) **rfratto**: Add probot-stale configuration to close stale issues. - - - - - - - - - - # 0.3.0 (2019-08-16) ### Features/Enhancements - * **Loki** [877](https://github.com/grafana/loki/pull/877) **pracucci**: loki: Improve Tailer loop * **Loki** [870](https://github.com/grafana/loki/pull/870) **sandlis**: bigtable-backup: update docker image for bigtable-backup tool * **Loki** [862](https://github.com/grafana/loki/pull/862) **sandlis**: live-tailing: preload all the historic entries before query context is cancelled @@ -3707,7 +3787,6 @@ Loki is now using a Bot to help keep issues and PR's pruned based on age/relevan > 857 POSSIBLY BREAKING: If you relied on a custom pod label to overwrite one of the labels configured by the other sections of the scrape config: `job`, `namespace`, `instance`, `container_name` and/or `__path__`, this will no longer happen, the custom pod labels are now loaded first and will be overwritten by any of these listed labels. - ### Fixes * **Loki** [897](https://github.com/grafana/loki/pull/897) **pracucci**: Fix panic in tailer when an ingester is removed from the ring while tailing @@ -3725,7 +3804,6 @@ Loki is now using a Bot to help keep issues and PR's pruned based on age/relevan * **Logcli** [863](https://github.com/grafana/loki/pull/863) **adityacs**: Fix Nolabels parse metrics - # 0.2.0 (2019-08-02) There were over 100 PR's merged since 0.1.0 was released, here's a highlight: @@ -3767,7 +3845,6 @@ There were many fixes, here are a few of the most important: * **Fluent-Plugin**: [667](https://github.com/grafana/loki/pull/667) Rename fluent plugin. * **Docker-Plugin**: [813](https://github.com/grafana/loki/pull/813) Fix panic for newer docker version (18.09.7+). - # 0.1.0 (2019-06-03) First (beta) Release! diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index ea0ce206fd720..0f9661192f873 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -156,6 +156,185 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set # itself to a key value store. [ingester: ] +pattern_ingester: + # Whether the pattern ingester is enabled. + # CLI flag: -pattern-ingester.enabled + [enabled: | default = false] + + # Configures how the lifecycle of the pattern ingester will operate and where + # it will register for discovery. + lifecycler: + ring: + kvstore: + # Backend storage to use for the ring. Supported values are: consul, + # etcd, inmemory, memberlist, multi. + # CLI flag: -pattern-ingester.store + [store: | default = "consul"] + + # The prefix for the keys in the store. Should end with a /. + # CLI flag: -pattern-ingester.prefix + [prefix: | default = "collectors/"] + + # Configuration for a Consul client. Only applies if the selected + # kvstore is consul. + # The CLI flags prefix for this block configuration is: pattern-ingester + [consul: ] + + # Configuration for an ETCD v3 client. Only applies if the selected + # kvstore is etcd. + # The CLI flags prefix for this block configuration is: pattern-ingester + [etcd: ] + + multi: + # Primary backend storage used by multi-client. + # CLI flag: -pattern-ingester.multi.primary + [primary: | default = ""] + + # Secondary backend storage used by multi-client. + # CLI flag: -pattern-ingester.multi.secondary + [secondary: | default = ""] + + # Mirror writes to secondary store. + # CLI flag: -pattern-ingester.multi.mirror-enabled + [mirror_enabled: | default = false] + + # Timeout for storing value to secondary store. + # CLI flag: -pattern-ingester.multi.mirror-timeout + [mirror_timeout: | default = 2s] + + # The heartbeat timeout after which ingesters are skipped for + # reads/writes. 0 = never (timeout disabled). + # CLI flag: -pattern-ingester.ring.heartbeat-timeout + [heartbeat_timeout: | default = 1m] + + # The number of ingesters to write to and read from. + # CLI flag: -pattern-ingester.distributor.replication-factor + [replication_factor: | default = 1] + + # True to enable the zone-awareness and replicate ingested samples across + # different availability zones. + # CLI flag: -pattern-ingester.distributor.zone-awareness-enabled + [zone_awareness_enabled: | default = false] + + # Comma-separated list of zones to exclude from the ring. Instances in + # excluded zones will be filtered out from the ring. + # CLI flag: -pattern-ingester.distributor.excluded-zones + [excluded_zones: | default = ""] + + # Number of tokens for each ingester. + # CLI flag: -pattern-ingester.num-tokens + [num_tokens: | default = 128] + + # Period at which to heartbeat to consul. 0 = disabled. + # CLI flag: -pattern-ingester.heartbeat-period + [heartbeat_period: | default = 5s] + + # Heartbeat timeout after which instance is assumed to be unhealthy. 0 = + # disabled. + # CLI flag: -pattern-ingester.heartbeat-timeout + [heartbeat_timeout: | default = 1m] + + # Observe tokens after generating to resolve collisions. Useful when using + # gossiping ring. + # CLI flag: -pattern-ingester.observe-period + [observe_period: | default = 0s] + + # Period to wait for a claim from another member; will join automatically + # after this. + # CLI flag: -pattern-ingester.join-after + [join_after: | default = 0s] + + # Minimum duration to wait after the internal readiness checks have passed + # but before succeeding the readiness endpoint. This is used to slowdown + # deployment controllers (eg. Kubernetes) after an instance is ready and + # before they proceed with a rolling update, to give the rest of the cluster + # instances enough time to receive ring updates. + # CLI flag: -pattern-ingester.min-ready-duration + [min_ready_duration: | default = 15s] + + # Name of network interface to read address from. + # CLI flag: -pattern-ingester.lifecycler.interface + [interface_names: | default = []] + + # Enable IPv6 support. Required to make use of IP addresses from IPv6 + # interfaces. + # CLI flag: -pattern-ingester.enable-inet6 + [enable_inet6: | default = false] + + # Duration to sleep for before exiting, to ensure metrics are scraped. + # CLI flag: -pattern-ingester.final-sleep + [final_sleep: | default = 0s] + + # File path where tokens are stored. If empty, tokens are not stored at + # shutdown and restored at startup. + # CLI flag: -pattern-ingester.tokens-file-path + [tokens_file_path: | default = ""] + + # The availability zone where this instance is running. + # CLI flag: -pattern-ingester.availability-zone + [availability_zone: | default = ""] + + # Unregister from the ring upon clean shutdown. It can be useful to disable + # for rolling restarts with consistent naming in conjunction with + # -distributor.extend-writes=false. + # CLI flag: -pattern-ingester.unregister-on-shutdown + [unregister_on_shutdown: | default = true] + + # When enabled the readiness probe succeeds only after all instances are + # ACTIVE and healthy in the ring, otherwise only the instance itself is + # checked. This option should be disabled if in your cluster multiple + # instances can be rolled out simultaneously, otherwise rolling updates may + # be slowed down. + # CLI flag: -pattern-ingester.readiness-check-ring-health + [readiness_check_ring_health: | default = true] + + # IP address to advertise in the ring. + # CLI flag: -pattern-ingester.lifecycler.addr + [address: | default = ""] + + # port to advertise in consul (defaults to server.grpc-listen-port). + # CLI flag: -pattern-ingester.lifecycler.port + [port: | default = 0] + + # ID to register in the ring. + # CLI flag: -pattern-ingester.lifecycler.ID + [id: | default = ""] + + # Configures how the pattern ingester will connect to the ingesters. + client_config: + # Configures how connections are pooled. + pool_config: + # How frequently to clean up clients for ingesters that have gone away. + # CLI flag: -pattern-ingester.client-cleanup-period + [client_cleanup_period: | default = 15s] + + # Run a health check on each ingester client during periodic cleanup. + # CLI flag: -pattern-ingester.health-check-ingesters + [health_check_ingesters: | default = true] + + # Timeout for the health check. + # CLI flag: -pattern-ingester.remote-timeout + [remote_timeout: | default = 1s] + + # The remote request timeout on the client side. + # CLI flag: -pattern-ingester.client.timeout + [remote_timeout: | default = 5s] + + # Configures how the gRPC connection to ingesters work as a client. + # The CLI flags prefix for this block configuration is: + # pattern-ingester.client + [grpc_client_config: ] + + # How many flushes can happen concurrently from each stream. + # CLI flag: -pattern-ingester.concurrent-flushes + [concurrent_flushes: | default = 32] + + # How often should the ingester see if there are any blocks to flush. The + # first flush check is delayed by a random time up to 0.8x the flush check + # period. Additionally, there is +/- 1% jitter added to the interval. + # CLI flag: -pattern-ingester.flush-check-period + [flush_check_period: | default = 30s] + # The index_gateway block configures the Loki index gateway server, responsible # for serving index queries without the need to constantly interact with the # object store. @@ -3963,6 +4142,7 @@ Configuration for a Consul client. Only applies if the selected kvstore is `cons - `compactor.ring` - `distributor.ring` - `index-gateway.ring` +- `pattern-ingester` - `query-scheduler.ring` - `ruler.ring` @@ -4009,6 +4189,7 @@ Configuration for an ETCD v3 client. Only applies if the selected kvstore is `et - `compactor.ring` - `distributor.ring` - `index-gateway.ring` +- `pattern-ingester` - `query-scheduler.ring` - `ruler.ring` @@ -4310,6 +4491,7 @@ The `grpc_client` block configures the gRPC client used to communicate between t - `boltdb.shipper.index-gateway-client.grpc` - `frontend.grpc-client-config` - `ingester.client` +- `pattern-ingester.client` - `querier.frontend-client` - `query-scheduler.grpc-client-config` - `ruler.client` diff --git a/docs/sources/reference/api.md b/docs/sources/reference/api.md index a6fe7cfe14217..5682510e4d861 100644 --- a/docs/sources/reference/api.md +++ b/docs/sources/reference/api.md @@ -43,6 +43,7 @@ These HTTP endpoints are exposed by the `querier`, `query-frontend`, `read`, and - [`GET /loki/api/v1/index/stats`](#query-log-statistics) - [`GET /loki/api/v1/index/volume`](#query-log-volume) - [`GET /loki/api/v1/index/volume_range`](#query-log-volume) +- [`GET /loki/api/v1/patterns`](#patterns-detection) - [`GET /loki/api/v1/tail`](#stream-logs) ### Status endpoints @@ -849,6 +850,105 @@ URL query parameters: You can URL-encode these parameters directly in the request body by using the POST method and `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large or dynamic number of stream selectors that may breach server-side URL character limits. +## Patterns detection + +```bash +GET /loki/api/v1/patterns +``` + +{{< admonition type="note" >}} +You must configure + +```yaml +pattern_ingester: + enabled: true +``` + +to enable this feature. +{{< /admonition >}} + +The `/loki/api/v1/patterns` endpoint can be used to query loki for patterns detected in the logs. This helps understand the structure of the logs Loki has ingested. + +The `query` should be a valid LogQL stream selector, for example `{job="foo", env=~".+"}`. The result is aggregated by the `pattern` from all matching streams. + +For each pattern detected, the response includes the pattern itself and the number of samples for each pattern at each timestamp. + +For example, if you have the following logs: + +```log +ts=2024-03-30T23:03:40 caller=grpc_logging.go:66 level=info method=/cortex.Ingester/Push duration=200ms msg=gRPC +ts=2024-03-30T23:03:41 caller=grpc_logging.go:66 level=info method=/cortex.Ingester/Push duration=500ms msg=gRPC +``` + +The pattern detected would be: + +```log +ts=<_> caller=grpc_logging.go:66 level=info method=/cortex.Ingester/Push duration=<_> msg=gRPC +``` + +URL query parameters: + +- `query`: The [LogQL]({{< relref "../query" >}}) matchers to check (that is, `{job="foo", env=~".+"}`). This parameter is required. +- `start=`: Start timestamp. This parameter is required. +- `end=`: End timestamp. This parameter is required. + +### Examples + +This example cURL command + +```bash +curl -s "http://localhost:3100/loki/api/v1/patterns" \ + --data-urlencode 'query={app="loki"}' | jq +``` + +gave this response: + +```json +{ + "status": "success", + "data": [ + { + "pattern": "<_> caller=grpc_logging.go:66 <_> level=error method=/cortex.Ingester/Push <_> msg=gRPC err=\"connection refused to object store\"", + "samples": [ + [ + 1711839260, + 1 + ], + [ + 1711839270, + 2 + ], + [ + 1711839280, + 1 + ] + ] + }, + { + "pattern": "<_> caller=grpc_logging.go:66 <_> level=info method=/cortex.Ingester/Push <_> msg=gRPC", + "samples": [ + [ + 1711839260, + 105 + ], + [ + 1711839270, + 222 + ], + [ + 1711839280, + 196 + ] + ] + } + ] +} +``` + +The result is a list of patterns detected in the logs, with the number of samples for each pattern at each timestamp. +The pattern format is the same as the [LogQL]({{< relref "../query" >}}) pattern filter and parser and can be used in queries for filtering matching logs. +Each sample is a tuple of timestamp (second) and count. + ## Stream logs ```bash diff --git a/go.mod b/go.mod index 0d0659a92220b..74331ec546e93 100644 --- a/go.mod +++ b/go.mod @@ -126,6 +126,7 @@ require ( github.com/gogo/googleapis v1.4.0 github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/heroku/x v0.0.61 github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b github.com/prometheus/alertmanager v0.27.0 diff --git a/go.sum b/go.sum index 1a12de960bec5..0a9c33bbbcc0a 100644 --- a/go.sum +++ b/go.sum @@ -1129,6 +1129,8 @@ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcp-scada-provider v0.2.0/go.mod h1:Q0WpS2RyhBKOPD4X/8oW7AJe7jA2HXB09EwDzwRTao0= github.com/hashicorp/hcp-sdk-go v0.23.0/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index 99bf587b21e85..18350da22eb08 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -94,7 +94,7 @@ type instance struct { streams *streamsMap index *index.Multi - mapper *fpMapper // using of mapper no longer needs mutex because reading from streams is lock-free + mapper *FpMapper // using of mapper no longer needs mutex because reading from streams is lock-free instanceID string @@ -175,7 +175,7 @@ func newInstance( writeFailures: writeFailures, schemaconfig: &c, } - i.mapper = newFPMapper(i.getLabelsFromFingerprint) + i.mapper = NewFPMapper(i.getLabelsFromFingerprint) return i, err } @@ -383,7 +383,6 @@ func (i *instance) chunkFormatAt(at model.Time) (byte, chunkenc.HeadBlockFmt, er } return chunkFormat, headblock, nil - } // getOrCreateStream returns the stream or creates it. @@ -411,7 +410,7 @@ func (i *instance) removeStream(s *stream) { func (i *instance) getHashForLabels(ls labels.Labels) model.Fingerprint { var fp uint64 fp, i.buf = ls.HashWithoutLabels(i.buf, []string(nil)...) - return i.mapper.mapFP(model.Fingerprint(fp), ls) + return i.mapper.MapFP(model.Fingerprint(fp), ls) } // Return labels associated with given fingerprint. Used by fingerprint mapper. diff --git a/pkg/ingester/mapper.go b/pkg/ingester/mapper.go index ced7c0d6833e6..6bd75bf3e8b6f 100644 --- a/pkg/ingester/mapper.go +++ b/pkg/ingester/mapper.go @@ -18,9 +18,9 @@ const maxMappedFP = 1 << 20 // About 1M fingerprints reserved for mapping. var separatorString = string([]byte{model.SeparatorByte}) -// fpMapper is used to map fingerprints in order to work around fingerprint +// FpMapper is used to map fingerprints in order to work around fingerprint // collisions. -type fpMapper struct { +type FpMapper struct { // highestMappedFP has to be aligned for atomic operations. highestMappedFP atomic.Uint64 @@ -34,22 +34,22 @@ type fpMapper struct { fpToLabels func(fingerprint model.Fingerprint) labels.Labels } -// newFPMapper returns an fpMapper ready to use. -func newFPMapper(fpToLabels func(fingerprint model.Fingerprint) labels.Labels) *fpMapper { +// NewFPMapper returns an fpMapper ready to use. +func NewFPMapper(fpToLabels func(fingerprint model.Fingerprint) labels.Labels) *FpMapper { if fpToLabels == nil { panic("nil fpToLabels") } - return &fpMapper{ + return &FpMapper{ fpToLabels: fpToLabels, mappings: map[model.Fingerprint]map[string]model.Fingerprint{}, } } -// mapFP takes a raw fingerprint (as returned by Metrics.FastFingerprint) and +// MapFP takes a raw fingerprint (as returned by Metrics.FastFingerprint) and // returns a truly unique fingerprint. The caller must have locked the raw // fingerprint. -func (m *fpMapper) mapFP(fp model.Fingerprint, metric labels.Labels) model.Fingerprint { +func (m *FpMapper) MapFP(fp model.Fingerprint, metric labels.Labels) model.Fingerprint { // First check if we are in the reserved FP space, in which case this is // automatically a collision that has to be mapped. if fp <= maxMappedFP { @@ -90,7 +90,7 @@ func (m *fpMapper) mapFP(fp model.Fingerprint, metric labels.Labels) model.Finge // maybeAddMapping is only used internally. It takes a detected collision and // adds it to the collisions map if not yet there. In any case, it returns the // truly unique fingerprint for the colliding metric. -func (m *fpMapper) maybeAddMapping(fp model.Fingerprint, collidingMetric labels.Labels) model.Fingerprint { +func (m *FpMapper) maybeAddMapping(fp model.Fingerprint, collidingMetric labels.Labels) model.Fingerprint { ms := metricToUniqueString(collidingMetric) m.mtx.RLock() mappedFPs, ok := m.mappings[fp] @@ -127,7 +127,7 @@ func (m *fpMapper) maybeAddMapping(fp model.Fingerprint, collidingMetric labels. return mappedFP } -func (m *fpMapper) nextMappedFP() model.Fingerprint { +func (m *FpMapper) nextMappedFP() model.Fingerprint { mappedFP := model.Fingerprint(m.highestMappedFP.Inc()) if mappedFP > maxMappedFP { panic(fmt.Errorf("more than %v fingerprints mapped in collision detection", maxMappedFP)) diff --git a/pkg/ingester/mapper_test.go b/pkg/ingester/mapper_test.go index 3104135740a32..e747dc247c074 100644 --- a/pkg/ingester/mapper_test.go +++ b/pkg/ingester/mapper_test.go @@ -58,73 +58,73 @@ func copyValuesAndSort(a []labels.Label) labels.Labels { func TestFPMapper(t *testing.T) { sm := map[model.Fingerprint]labels.Labels{} - mapper := newFPMapper(func(fp model.Fingerprint) labels.Labels { + mapper := NewFPMapper(func(fp model.Fingerprint) labels.Labels { return sm[fp] }) // Everything is empty, resolving a FP should do nothing. - assertFingerprintEqual(t, mapper.mapFP(fp1, cm11), fp1) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm12), fp1) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm11), fp1) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm12), fp1) // cm11 is in sm. Adding cm11 should do nothing. Mapping cm12 should resolve // the collision. sm[fp1] = copyValuesAndSort(cm11) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm11), fp1) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm12), model.Fingerprint(1)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm11), fp1) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm12), model.Fingerprint(1)) // The mapped cm12 is added to sm, too. That should not change the outcome. sm[model.Fingerprint(1)] = copyValuesAndSort(cm12) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm11), fp1) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm12), model.Fingerprint(1)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm11), fp1) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm12), model.Fingerprint(1)) // Now map cm13, should reproducibly result in the next mapped FP. - assertFingerprintEqual(t, mapper.mapFP(fp1, cm13), model.Fingerprint(2)) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm13), model.Fingerprint(2)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm13), model.Fingerprint(2)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm13), model.Fingerprint(2)) // Add cm13 to sm. Should not change anything. sm[model.Fingerprint(2)] = copyValuesAndSort(cm13) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm11), fp1) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm12), model.Fingerprint(1)) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm13), model.Fingerprint(2)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm11), fp1) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm12), model.Fingerprint(1)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm13), model.Fingerprint(2)) // Now add cm21 and cm22 in the same way, checking the mapped FPs. - assertFingerprintEqual(t, mapper.mapFP(fp2, cm21), fp2) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm21), fp2) sm[fp2] = copyValuesAndSort(cm21) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm21), fp2) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm22), model.Fingerprint(3)) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm21), fp2) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm22), model.Fingerprint(3)) sm[model.Fingerprint(3)] = copyValuesAndSort(cm22) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm21), fp2) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm22), model.Fingerprint(3)) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm21), fp2) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm22), model.Fingerprint(3)) // Map cm31, resulting in a mapping straight away. - assertFingerprintEqual(t, mapper.mapFP(fp3, cm31), model.Fingerprint(4)) + assertFingerprintEqual(t, mapper.MapFP(fp3, cm31), model.Fingerprint(4)) sm[model.Fingerprint(4)] = copyValuesAndSort(cm31) // Map cm32, which is now mapped for two reasons... - assertFingerprintEqual(t, mapper.mapFP(fp3, cm32), model.Fingerprint(5)) + assertFingerprintEqual(t, mapper.MapFP(fp3, cm32), model.Fingerprint(5)) sm[model.Fingerprint(5)] = copyValuesAndSort(cm32) // Now check ALL the mappings, just to be sure. - assertFingerprintEqual(t, mapper.mapFP(fp1, cm11), fp1) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm12), model.Fingerprint(1)) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm13), model.Fingerprint(2)) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm21), fp2) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm22), model.Fingerprint(3)) - assertFingerprintEqual(t, mapper.mapFP(fp3, cm31), model.Fingerprint(4)) - assertFingerprintEqual(t, mapper.mapFP(fp3, cm32), model.Fingerprint(5)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm11), fp1) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm12), model.Fingerprint(1)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm13), model.Fingerprint(2)) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm21), fp2) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm22), model.Fingerprint(3)) + assertFingerprintEqual(t, mapper.MapFP(fp3, cm31), model.Fingerprint(4)) + assertFingerprintEqual(t, mapper.MapFP(fp3, cm32), model.Fingerprint(5)) // Remove all the fingerprints from sm, which should change nothing, as // the existing mappings stay and should be detected. delete(sm, fp1) delete(sm, fp2) delete(sm, fp3) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm11), fp1) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm12), model.Fingerprint(1)) - assertFingerprintEqual(t, mapper.mapFP(fp1, cm13), model.Fingerprint(2)) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm21), fp2) - assertFingerprintEqual(t, mapper.mapFP(fp2, cm22), model.Fingerprint(3)) - assertFingerprintEqual(t, mapper.mapFP(fp3, cm31), model.Fingerprint(4)) - assertFingerprintEqual(t, mapper.mapFP(fp3, cm32), model.Fingerprint(5)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm11), fp1) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm12), model.Fingerprint(1)) + assertFingerprintEqual(t, mapper.MapFP(fp1, cm13), model.Fingerprint(2)) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm21), fp2) + assertFingerprintEqual(t, mapper.MapFP(fp2, cm22), model.Fingerprint(3)) + assertFingerprintEqual(t, mapper.MapFP(fp3, cm31), model.Fingerprint(4)) + assertFingerprintEqual(t, mapper.MapFP(fp3, cm32), model.Fingerprint(5)) } // assertFingerprintEqual asserts that two fingerprints are equal. diff --git a/pkg/loghttp/patterns.go b/pkg/loghttp/patterns.go new file mode 100644 index 0000000000000..0517f9c059963 --- /dev/null +++ b/pkg/loghttp/patterns.go @@ -0,0 +1,21 @@ +package loghttp + +import ( + "net/http" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func ParsePatternsQuery(r *http.Request) (*logproto.QueryPatternsRequest, error) { + req := &logproto.QueryPatternsRequest{} + + start, end, err := bounds(r) + if err != nil { + return nil, err + } + req.Start = start + req.End = end + + req.Query = query(r) + return req, nil +} diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go index 82979824a5f57..212abc38633ec 100644 --- a/pkg/logproto/compat.go +++ b/pkg/logproto/compat.go @@ -445,3 +445,33 @@ func (m *ShardsRequest) LogToSpan(sp opentracing.Span) { } sp.LogFields(fields...) } + +func (m *QueryPatternsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } + +func (m *QueryPatternsRequest) GetStep() int64 { return 0 } + +func (m *QueryPatternsRequest) WithStartEnd(start, end time.Time) definitions.Request { + clone := *m + clone.Start = start + clone.End = end + return &clone +} + +func (m *QueryPatternsRequest) WithQuery(query string) definitions.Request { + clone := *m + clone.Query = query + return &clone +} + +func (m *QueryPatternsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + return m.WithStartEnd(start, end).(resultscache.Request) +} + +func (m *QueryPatternsRequest) LogToSpan(sp opentracing.Span) { + fields := []otlog.Field{ + otlog.String("start", m.Start.String()), + otlog.String("end", m.End.String()), + otlog.String("query", m.GetQuery()), + } + sp.LogFields(fields...) +} diff --git a/pkg/logproto/extensions.go b/pkg/logproto/extensions.go index 19e1f7be3b0c5..7cd763cb5bc4e 100644 --- a/pkg/logproto/extensions.go +++ b/pkg/logproto/extensions.go @@ -7,6 +7,7 @@ import ( "github.com/cespare/xxhash/v2" "github.com/dustin/go-humanize" + jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -152,6 +153,32 @@ const ( DetectedFieldBytes DetectedFieldType = "bytes" ) +// UnmarshalJSON implements the json.Unmarshaler interface. +// QueryPatternsResponse json representation is different from the proto +// +// `{"status":"success","data":[{"pattern":"foo <*> bar","samples":[[1,1],[2,2]]},{"pattern":"foo <*> buzz","samples":[[3,1],[3,2]]}]}` +func (r *QueryPatternsResponse) UnmarshalJSON(data []byte) error { + var v struct { + Status string `json:"status"` + Data []struct { + Pattern string `json:"pattern"` + Samples [][]int64 `json:"samples"` + } `json:"data"` + } + if err := jsoniter.ConfigFastest.Unmarshal(data, &v); err != nil { + return err + } + r.Series = make([]*PatternSeries, 0, len(v.Data)) + for _, d := range v.Data { + samples := make([]*PatternSample, 0, len(d.Samples)) + for _, s := range d.Samples { + samples = append(samples, &PatternSample{Timestamp: model.TimeFromUnix(s[0]), Value: s[1]}) + } + r.Series = append(r.Series, &PatternSeries{Pattern: d.Pattern, Samples: samples}) + } + return nil +} + func (d DetectedFieldType) String() string { return string(d) } diff --git a/pkg/logproto/pattern.pb.go b/pkg/logproto/pattern.pb.go new file mode 100644 index 0000000000000..b1b5755e9dfa0 --- /dev/null +++ b/pkg/logproto/pattern.pb.go @@ -0,0 +1,1459 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/logproto/pattern.proto + +package logproto + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + push "github.com/grafana/loki/pkg/push" + github_com_prometheus_common_model "github.com/prometheus/common/model" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QueryPatternsRequest struct { + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` +} + +func (m *QueryPatternsRequest) Reset() { *m = QueryPatternsRequest{} } +func (*QueryPatternsRequest) ProtoMessage() {} +func (*QueryPatternsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aaf4192acc66a4ea, []int{0} +} +func (m *QueryPatternsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPatternsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPatternsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPatternsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPatternsRequest.Merge(m, src) +} +func (m *QueryPatternsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPatternsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPatternsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPatternsRequest proto.InternalMessageInfo + +func (m *QueryPatternsRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *QueryPatternsRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *QueryPatternsRequest) GetEnd() time.Time { + if m != nil { + return m.End + } + return time.Time{} +} + +type QueryPatternsResponse struct { + Series []*PatternSeries `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` +} + +func (m *QueryPatternsResponse) Reset() { *m = QueryPatternsResponse{} } +func (*QueryPatternsResponse) ProtoMessage() {} +func (*QueryPatternsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aaf4192acc66a4ea, []int{1} +} +func (m *QueryPatternsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPatternsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPatternsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPatternsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPatternsResponse.Merge(m, src) +} +func (m *QueryPatternsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPatternsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPatternsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPatternsResponse proto.InternalMessageInfo + +func (m *QueryPatternsResponse) GetSeries() []*PatternSeries { + if m != nil { + return m.Series + } + return nil +} + +type PatternSeries struct { + Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` + Samples []*PatternSample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples,omitempty"` +} + +func (m *PatternSeries) Reset() { *m = PatternSeries{} } +func (*PatternSeries) ProtoMessage() {} +func (*PatternSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_aaf4192acc66a4ea, []int{2} +} +func (m *PatternSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatternSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PatternSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PatternSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatternSeries.Merge(m, src) +} +func (m *PatternSeries) XXX_Size() int { + return m.Size() +} +func (m *PatternSeries) XXX_DiscardUnknown() { + xxx_messageInfo_PatternSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_PatternSeries proto.InternalMessageInfo + +func (m *PatternSeries) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PatternSeries) GetSamples() []*PatternSample { + if m != nil { + return m.Samples + } + return nil +} + +type PatternSample struct { + Timestamp github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=timestamp,proto3,customtype=github.com/prometheus/common/model.Time" json:"timestamp"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *PatternSample) Reset() { *m = PatternSample{} } +func (*PatternSample) ProtoMessage() {} +func (*PatternSample) Descriptor() ([]byte, []int) { + return fileDescriptor_aaf4192acc66a4ea, []int{3} +} +func (m *PatternSample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatternSample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PatternSample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PatternSample) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatternSample.Merge(m, src) +} +func (m *PatternSample) XXX_Size() int { + return m.Size() +} +func (m *PatternSample) XXX_DiscardUnknown() { + xxx_messageInfo_PatternSample.DiscardUnknown(m) +} + +var xxx_messageInfo_PatternSample proto.InternalMessageInfo + +func (m *PatternSample) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterType((*QueryPatternsRequest)(nil), "logproto.QueryPatternsRequest") + proto.RegisterType((*QueryPatternsResponse)(nil), "logproto.QueryPatternsResponse") + proto.RegisterType((*PatternSeries)(nil), "logproto.PatternSeries") + proto.RegisterType((*PatternSample)(nil), "logproto.PatternSample") +} + +func init() { proto.RegisterFile("pkg/logproto/pattern.proto", fileDescriptor_aaf4192acc66a4ea) } + +var fileDescriptor_aaf4192acc66a4ea = []byte{ + // 470 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x31, 0x6f, 0xd4, 0x30, + 0x14, 0x8e, 0x1b, 0xae, 0xd7, 0xba, 0x62, 0x31, 0x57, 0x88, 0x82, 0xe4, 0x9c, 0xb2, 0x70, 0x53, + 0x0c, 0x57, 0x09, 0x24, 0xc6, 0x9b, 0x18, 0x40, 0x2a, 0x81, 0x09, 0xc1, 0x90, 0x6b, 0x5d, 0xe7, + 0xd4, 0x38, 0x4e, 0x63, 0xbb, 0x12, 0x1b, 0x3f, 0xe1, 0x7e, 0x02, 0x23, 0x3f, 0xa5, 0xe3, 0x8d, + 0x15, 0x43, 0xe1, 0x72, 0x0b, 0x63, 0x7f, 0x02, 0x8a, 0xed, 0xf4, 0xae, 0x15, 0x1d, 0x58, 0x12, + 0xbf, 0xf7, 0x7d, 0xef, 0xf3, 0xf7, 0xde, 0x33, 0x0c, 0xab, 0x53, 0x46, 0x0a, 0xc1, 0xaa, 0x5a, + 0x28, 0x41, 0xaa, 0x4c, 0x29, 0x5a, 0x97, 0x89, 0x89, 0xd0, 0x4e, 0x97, 0x0f, 0x07, 0x4c, 0x30, + 0x61, 0x29, 0xed, 0xc9, 0xe2, 0x61, 0xc4, 0x84, 0x60, 0x05, 0x25, 0x26, 0x9a, 0xea, 0x13, 0xa2, + 0x66, 0x9c, 0x4a, 0x95, 0xf1, 0xca, 0x11, 0x9e, 0xde, 0x12, 0xef, 0x0e, 0x0e, 0x7c, 0xd4, 0x82, + 0x95, 0x96, 0xb9, 0xf9, 0xd8, 0x64, 0xfc, 0x1d, 0xc0, 0xc1, 0x7b, 0x4d, 0xeb, 0xaf, 0x87, 0xd6, + 0x89, 0x4c, 0xe9, 0x99, 0xa6, 0x52, 0xa1, 0x01, 0xec, 0x9d, 0xb5, 0xf9, 0x00, 0x0c, 0xc1, 0x68, + 0x37, 0xb5, 0x01, 0x7a, 0x0d, 0x7b, 0x52, 0x65, 0xb5, 0x0a, 0xb6, 0x86, 0x60, 0xb4, 0x37, 0x0e, + 0x13, 0xeb, 0x28, 0xe9, 0x1c, 0x25, 0x1f, 0x3b, 0x47, 0x93, 0x9d, 0x8b, 0xab, 0xc8, 0x9b, 0xff, + 0x8a, 0x40, 0x6a, 0x4b, 0xd0, 0x4b, 0xe8, 0xd3, 0xf2, 0x38, 0xf0, 0xff, 0xa3, 0xb2, 0x2d, 0x88, + 0xdf, 0xc0, 0xfd, 0x3b, 0x0e, 0x65, 0x25, 0x4a, 0x49, 0x11, 0x81, 0xdb, 0x92, 0xd6, 0x33, 0x2a, + 0x03, 0x30, 0xf4, 0x47, 0x7b, 0xe3, 0x27, 0xc9, 0x4d, 0xc7, 0x8e, 0xfb, 0xc1, 0xc0, 0xa9, 0xa3, + 0xc5, 0x9f, 0xe1, 0xc3, 0x5b, 0x00, 0x0a, 0x60, 0xdf, 0x6d, 0xc0, 0xb5, 0xd9, 0x85, 0xe8, 0x05, + 0xec, 0xcb, 0x8c, 0x57, 0x05, 0x95, 0xc1, 0xd6, 0x7d, 0xe2, 0x06, 0x4f, 0x3b, 0x5e, 0xac, 0xd6, + 0xea, 0x26, 0x83, 0xde, 0xc1, 0xdd, 0x9b, 0x05, 0x19, 0x7d, 0x7f, 0x42, 0xda, 0xd6, 0x7e, 0x5e, + 0x45, 0xcf, 0xd8, 0x4c, 0xe5, 0x7a, 0x9a, 0x1c, 0x09, 0xde, 0x6e, 0x93, 0x53, 0x95, 0x53, 0x2d, + 0xc9, 0x91, 0xe0, 0x5c, 0x94, 0x84, 0x8b, 0x63, 0x5a, 0x98, 0x81, 0xa4, 0x6b, 0x85, 0x76, 0x23, + 0xe7, 0x59, 0xa1, 0xa9, 0x99, 0xbd, 0x9f, 0xda, 0x60, 0x3c, 0x07, 0xb0, 0xef, 0xae, 0x45, 0xaf, + 0xe0, 0x83, 0x43, 0x2d, 0x73, 0xb4, 0xbf, 0xe1, 0x55, 0xcb, 0xdc, 0xad, 0x34, 0x7c, 0x7c, 0x37, + 0x6d, 0xe7, 0x18, 0x7b, 0xe8, 0x2d, 0xec, 0x99, 0x11, 0x23, 0xbc, 0xa6, 0xfc, 0xeb, 0x55, 0x84, + 0xd1, 0xbd, 0x78, 0xa7, 0xf5, 0x1c, 0x4c, 0xbe, 0x2c, 0x96, 0xd8, 0xbb, 0x5c, 0x62, 0xef, 0x7a, + 0x89, 0xc1, 0xb7, 0x06, 0x83, 0x1f, 0x0d, 0x06, 0x17, 0x0d, 0x06, 0x8b, 0x06, 0x83, 0xdf, 0x0d, + 0x06, 0x7f, 0x1a, 0xec, 0x5d, 0x37, 0x18, 0xcc, 0x57, 0xd8, 0x5b, 0xac, 0xb0, 0x77, 0xb9, 0xc2, + 0xde, 0xa7, 0xcd, 0x91, 0xb0, 0x3a, 0x3b, 0xc9, 0xca, 0x8c, 0x14, 0xe2, 0x74, 0x46, 0xce, 0x0f, + 0xc8, 0xe6, 0xb3, 0x9e, 0x6e, 0x9b, 0xdf, 0xc1, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0xd6, + 0xbc, 0xfc, 0x4a, 0x03, 0x00, 0x00, +} + +func (this *QueryPatternsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryPatternsRequest) + if !ok { + that2, ok := that.(QueryPatternsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Query != that1.Query { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + if !this.End.Equal(that1.End) { + return false + } + return true +} +func (this *QueryPatternsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryPatternsResponse) + if !ok { + that2, ok := that.(QueryPatternsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Series) != len(that1.Series) { + return false + } + for i := range this.Series { + if !this.Series[i].Equal(that1.Series[i]) { + return false + } + } + return true +} +func (this *PatternSeries) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PatternSeries) + if !ok { + that2, ok := that.(PatternSeries) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Pattern != that1.Pattern { + return false + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(that1.Samples[i]) { + return false + } + } + return true +} +func (this *PatternSample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PatternSample) + if !ok { + that2, ok := that.(PatternSample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Timestamp.Equal(that1.Timestamp) { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *QueryPatternsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.QueryPatternsRequest{") + s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryPatternsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.QueryPatternsResponse{") + if this.Series != nil { + s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PatternSeries) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.PatternSeries{") + s = append(s, "Pattern: "+fmt.Sprintf("%#v", this.Pattern)+",\n") + if this.Samples != nil { + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PatternSample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.PatternSample{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringPattern(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// PatternClient is the client API for Pattern service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PatternClient interface { + Push(ctx context.Context, in *push.PushRequest, opts ...grpc.CallOption) (*push.PushResponse, error) + Query(ctx context.Context, in *QueryPatternsRequest, opts ...grpc.CallOption) (Pattern_QueryClient, error) +} + +type patternClient struct { + cc *grpc.ClientConn +} + +func NewPatternClient(cc *grpc.ClientConn) PatternClient { + return &patternClient{cc} +} + +func (c *patternClient) Push(ctx context.Context, in *push.PushRequest, opts ...grpc.CallOption) (*push.PushResponse, error) { + out := new(push.PushResponse) + err := c.cc.Invoke(ctx, "/logproto.Pattern/Push", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *patternClient) Query(ctx context.Context, in *QueryPatternsRequest, opts ...grpc.CallOption) (Pattern_QueryClient, error) { + stream, err := c.cc.NewStream(ctx, &_Pattern_serviceDesc.Streams[0], "/logproto.Pattern/Query", opts...) + if err != nil { + return nil, err + } + x := &patternQueryClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Pattern_QueryClient interface { + Recv() (*QueryPatternsResponse, error) + grpc.ClientStream +} + +type patternQueryClient struct { + grpc.ClientStream +} + +func (x *patternQueryClient) Recv() (*QueryPatternsResponse, error) { + m := new(QueryPatternsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// PatternServer is the server API for Pattern service. +type PatternServer interface { + Push(context.Context, *push.PushRequest) (*push.PushResponse, error) + Query(*QueryPatternsRequest, Pattern_QueryServer) error +} + +// UnimplementedPatternServer can be embedded to have forward compatible implementations. +type UnimplementedPatternServer struct { +} + +func (*UnimplementedPatternServer) Push(ctx context.Context, req *push.PushRequest) (*push.PushResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") +} +func (*UnimplementedPatternServer) Query(req *QueryPatternsRequest, srv Pattern_QueryServer) error { + return status.Errorf(codes.Unimplemented, "method Query not implemented") +} + +func RegisterPatternServer(s *grpc.Server, srv PatternServer) { + s.RegisterService(&_Pattern_serviceDesc, srv) +} + +func _Pattern_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(push.PushRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PatternServer).Push(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.Pattern/Push", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PatternServer).Push(ctx, req.(*push.PushRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Pattern_Query_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(QueryPatternsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(PatternServer).Query(m, &patternQueryServer{stream}) +} + +type Pattern_QueryServer interface { + Send(*QueryPatternsResponse) error + grpc.ServerStream +} + +type patternQueryServer struct { + grpc.ServerStream +} + +func (x *patternQueryServer) Send(m *QueryPatternsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Pattern_serviceDesc = grpc.ServiceDesc{ + ServiceName: "logproto.Pattern", + HandlerType: (*PatternServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Push", + Handler: _Pattern_Push_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Query", + Handler: _Pattern_Query_Handler, + ServerStreams: true, + }, + }, + Metadata: "pkg/logproto/pattern.proto", +} + +func (m *QueryPatternsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPatternsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPatternsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintPattern(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x1a + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintPattern(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x12 + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintPattern(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPatternsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPatternsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPatternsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Series) > 0 { + for iNdEx := len(m.Series) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Series[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPattern(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PatternSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PatternSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatternSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPattern(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Pattern) > 0 { + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintPattern(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PatternSample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PatternSample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatternSample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = encodeVarintPattern(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + } + if m.Timestamp != 0 { + i = encodeVarintPattern(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPattern(dAtA []byte, offset int, v uint64) int { + offset -= sovPattern(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryPatternsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sovPattern(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovPattern(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovPattern(uint64(l)) + return n +} + +func (m *QueryPatternsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Series) > 0 { + for _, e := range m.Series { + l = e.Size() + n += 1 + l + sovPattern(uint64(l)) + } + } + return n +} + +func (m *PatternSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pattern) + if l > 0 { + n += 1 + l + sovPattern(uint64(l)) + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovPattern(uint64(l)) + } + } + return n +} + +func (m *PatternSample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovPattern(uint64(m.Timestamp)) + } + if m.Value != 0 { + n += 1 + sovPattern(uint64(m.Value)) + } + return n +} + +func sovPattern(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPattern(x uint64) (n int) { + return sovPattern(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *QueryPatternsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryPatternsRequest{`, + `Query:` + fmt.Sprintf("%v", this.Query) + `,`, + `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *QueryPatternsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForSeries := "[]*PatternSeries{" + for _, f := range this.Series { + repeatedStringForSeries += strings.Replace(f.String(), "PatternSeries", "PatternSeries", 1) + "," + } + repeatedStringForSeries += "}" + s := strings.Join([]string{`&QueryPatternsResponse{`, + `Series:` + repeatedStringForSeries + `,`, + `}`, + }, "") + return s +} +func (this *PatternSeries) String() string { + if this == nil { + return "nil" + } + repeatedStringForSamples := "[]*PatternSample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(f.String(), "PatternSample", "PatternSample", 1) + "," + } + repeatedStringForSamples += "}" + s := strings.Join([]string{`&PatternSeries{`, + `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, + `Samples:` + repeatedStringForSamples + `,`, + `}`, + }, "") + return s +} +func (this *PatternSample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PatternSample{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringPattern(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *QueryPatternsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPatternsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPatternsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPattern + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPattern + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPattern + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPattern + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPattern + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPattern + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPattern(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPatternsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPatternsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPatternsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPattern + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPattern + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Series = append(m.Series, &PatternSeries{}) + if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPattern(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PatternSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PatternSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PatternSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPattern + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPattern + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPattern + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPattern + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, &PatternSample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPattern(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PatternSample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PatternSample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PatternSample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= github_com_prometheus_common_model.Time(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPattern + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPattern(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPattern + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPattern(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPattern + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPattern + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPattern + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPattern + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthPattern + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPattern + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPattern(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthPattern + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPattern = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPattern = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/logproto/pattern.proto b/pkg/logproto/pattern.proto new file mode 100644 index 0000000000000..66a6c017b1926 --- /dev/null +++ b/pkg/logproto/pattern.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package logproto; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "pkg/logproto/logproto.proto"; +import "pkg/push/push.proto"; + +option go_package = "github.com/grafana/loki/v3/pkg/logproto"; + +service Pattern { + rpc Push(PushRequest) returns (PushResponse) {} + rpc Query(QueryPatternsRequest) returns (stream QueryPatternsResponse) {} +} + +message QueryPatternsRequest { + string query = 1; + google.protobuf.Timestamp start = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; + google.protobuf.Timestamp end = 3 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false + ]; +} + +message QueryPatternsResponse { + repeated PatternSeries series = 1; +} + +message PatternSeries { + string pattern = 1; + repeated PatternSample samples = 2; +} + +message PatternSample { + int64 timestamp = 1 [ + (gogoproto.customtype) = "github.com/prometheus/common/model.Time", + (gogoproto.nullable) = false + ]; + int64 value = 2; +} diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index a0faed6312960..7353f797bdd1f 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -243,6 +243,21 @@ func applyConfigToRings(r, defaults *ConfigWrapper, rc lokiring.RingConfig, merg r.Ingester.LifecyclerConfig.ObservePeriod = rc.ObservePeriod } + if mergeWithExisting { + r.Pattern.LifecyclerConfig.RingConfig.KVStore = rc.KVStore + r.Pattern.LifecyclerConfig.HeartbeatPeriod = rc.HeartbeatPeriod + r.Pattern.LifecyclerConfig.RingConfig.HeartbeatTimeout = rc.HeartbeatTimeout + r.Pattern.LifecyclerConfig.TokensFilePath = rc.TokensFilePath + r.Pattern.LifecyclerConfig.RingConfig.ZoneAwarenessEnabled = rc.ZoneAwarenessEnabled + r.Pattern.LifecyclerConfig.ID = rc.InstanceID + r.Pattern.LifecyclerConfig.InfNames = rc.InstanceInterfaceNames + r.Pattern.LifecyclerConfig.Port = rc.InstancePort + r.Pattern.LifecyclerConfig.Addr = rc.InstanceAddr + r.Pattern.LifecyclerConfig.Zone = rc.InstanceZone + r.Pattern.LifecyclerConfig.ListenPort = rc.ListenPort + r.Pattern.LifecyclerConfig.ObservePeriod = rc.ObservePeriod + } + // Distributor if mergeWithExisting || reflect.DeepEqual(r.Distributor.DistributorRing, defaults.Distributor.DistributorRing) { r.Distributor.DistributorRing.HeartbeatTimeout = rc.HeartbeatTimeout @@ -376,6 +391,13 @@ func applyTokensFilePath(cfg *ConfigWrapper) error { } cfg.BloomGateway.Ring.TokensFilePath = f + // Pattern + f, err = tokensFile(cfg, "pattern.tokens") + if err != nil { + return err + } + cfg.Pattern.LifecyclerConfig.TokensFilePath = f + return nil } @@ -430,6 +452,9 @@ func appendLoopbackInterface(cfg, defaults *ConfigWrapper) { if reflect.DeepEqual(cfg.Ingester.LifecyclerConfig.InfNames, defaults.Ingester.LifecyclerConfig.InfNames) { cfg.Ingester.LifecyclerConfig.InfNames = append(cfg.Ingester.LifecyclerConfig.InfNames, loopbackIface) } + if reflect.DeepEqual(cfg.Pattern.LifecyclerConfig.InfNames, defaults.Pattern.LifecyclerConfig.InfNames) { + cfg.Pattern.LifecyclerConfig.InfNames = append(cfg.Pattern.LifecyclerConfig.InfNames, loopbackIface) + } if reflect.DeepEqual(cfg.Frontend.FrontendV2.InfNames, defaults.Frontend.FrontendV2.InfNames) { cfg.Frontend.FrontendV2.InfNames = append(cfg.Config.Frontend.FrontendV2.InfNames, loopbackIface) @@ -474,6 +499,7 @@ func appendLoopbackInterface(cfg, defaults *ConfigWrapper) { // (for example, use consul for the distributor), it seems harmless to take a guess at better defaults here. func applyMemberlistConfig(r *ConfigWrapper) { r.Ingester.LifecyclerConfig.RingConfig.KVStore.Store = memberlistStr + r.Pattern.LifecyclerConfig.RingConfig.KVStore.Store = memberlistStr r.Distributor.DistributorRing.KVStore.Store = memberlistStr r.Ruler.Ring.KVStore.Store = memberlistStr r.QueryScheduler.SchedulerRing.KVStore.Store = memberlistStr diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index dc4a2bb08c4c4..c3ee72a7fc574 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -43,6 +43,7 @@ import ( "github.com/grafana/loki/v3/pkg/loki/common" "github.com/grafana/loki/v3/pkg/lokifrontend" "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/transport" + "github.com/grafana/loki/v3/pkg/pattern" "github.com/grafana/loki/v3/pkg/querier" "github.com/grafana/loki/v3/pkg/querier/queryrange" "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" @@ -87,6 +88,7 @@ type Config struct { Ruler ruler.Config `yaml:"ruler,omitempty"` IngesterClient ingester_client.Config `yaml:"ingester_client,omitempty"` Ingester ingester.Config `yaml:"ingester,omitempty"` + Pattern pattern.Config `yaml:"pattern_ingester,omitempty"` IndexGateway indexgateway.Config `yaml:"index_gateway"` BloomCompactor bloomcompactor.Config `yaml:"bloom_compactor"` BloomGateway bloomgateway.Config `yaml:"bloom_gateway"` @@ -184,6 +186,7 @@ func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { // but we can take values from throwaway flag set and reregister into supplied flags with new default values. c.Server.RegisterFlags(throwaway) c.InternalServer.RegisterFlags(throwaway) + c.Pattern.RegisterFlags(throwaway) throwaway.VisitAll(func(f *flag.Flag) { // Ignore errors when setting new values. We have a test to verify that it works. @@ -196,6 +199,9 @@ func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { case "server.http-listen-port": _ = f.Value.Set("3100") + + case "pattern-ingester.distributor.replication-factor": + _ = f.Value.Set("1") } fs.Var(f.Value, f.Name, f.Usage) @@ -264,6 +270,10 @@ func (c *Config) Validate() error { return errors.Wrap(err, "invalid bloom_compactor config") } + if err := c.Pattern.Validate(); err != nil { + return errors.Wrap(err, "invalid pattern_ingester config") + } + if err := ValidateConfigCompatibility(*c); err != nil { return err } @@ -345,6 +355,8 @@ type Loki struct { TenantLimits validation.TenantLimits distributor *distributor.Distributor Ingester ingester.Interface + PatternIngester *pattern.Ingester + PatternRingClient *pattern.RingClient Querier querier.Querier cacheGenerationLoader queryrangebase.CacheGenNumberLoader querierAPI *querier.QuerierAPI @@ -612,6 +624,15 @@ func (t *Loki) readyHandler(sm *services.Manager, shutdownRequested *atomic.Bool } } + // Pattern Ingester has a special check that makes sure that it was able to register into the ring, + // and that all other ring entries are OK too. + if t.PatternIngester != nil { + if err := t.PatternIngester.CheckReady(r.Context()); err != nil { + http.Error(w, "Pattern Ingester not ready: "+err.Error(), http.StatusServiceUnavailable) + return + } + } + // Query Frontend has a special check that makes sure that a querier is attached before it signals // itself as ready if t.frontend != nil { @@ -665,6 +686,8 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(QuerySchedulerRing, t.initQuerySchedulerRing, modules.UserInvisibleModule) mm.RegisterModule(Analytics, t.initAnalytics) mm.RegisterModule(CacheGenerationLoader, t.initCacheGenerationLoader) + mm.RegisterModule(PatternIngester, t.initPatternIngester) + mm.RegisterModule(PatternRingClient, t.initPatternRingClient, modules.UserInvisibleModule) mm.RegisterModule(All, nil) mm.RegisterModule(Read, nil) @@ -678,10 +701,10 @@ func (t *Loki) setupModuleManager() error { Overrides: {RuntimeConfig}, OverridesExporter: {Overrides, Server}, TenantConfigs: {RuntimeConfig}, - Distributor: {Ring, Server, Overrides, TenantConfigs, Analytics}, + Distributor: {Ring, Server, Overrides, TenantConfigs, PatternRingClient, Analytics}, Store: {Overrides, IndexGatewayRing}, Ingester: {Store, Server, MemberlistKV, TenantConfigs, Analytics}, - Querier: {Store, Ring, Server, IngesterQuerier, Overrides, Analytics, CacheGenerationLoader, QuerySchedulerRing}, + Querier: {Store, Ring, Server, IngesterQuerier, PatternRingClient, Overrides, Analytics, CacheGenerationLoader, QuerySchedulerRing}, QueryFrontendTripperware: {Server, Overrides, TenantConfigs}, QueryFrontend: {QueryFrontendTripperware, Analytics, CacheGenerationLoader, QuerySchedulerRing}, QueryScheduler: {Server, Overrides, MemberlistKV, Analytics, QuerySchedulerRing}, @@ -692,6 +715,8 @@ func (t *Loki) setupModuleManager() error { IndexGateway: {Server, Store, IndexGatewayRing, IndexGatewayInterceptors, Analytics}, BloomGateway: {Server, BloomStore, BloomGatewayRing, Analytics}, BloomCompactor: {Server, BloomStore, BloomCompactorRing, Analytics, Store}, + PatternIngester: {Server, MemberlistKV, Analytics}, + PatternRingClient: {Server, MemberlistKV, Analytics}, IngesterQuerier: {Ring}, QuerySchedulerRing: {Overrides, MemberlistKV}, IndexGatewayRing: {Overrides, MemberlistKV}, @@ -703,7 +728,7 @@ func (t *Loki) setupModuleManager() error { Write: {Ingester, Distributor}, Backend: {QueryScheduler, Ruler, Compactor, IndexGateway, BloomGateway, BloomCompactor}, - All: {QueryScheduler, QueryFrontend, Querier, Ingester, Distributor, Ruler, Compactor}, + All: {QueryScheduler, QueryFrontend, Querier, Ingester, PatternIngester, Distributor, Ruler, Compactor}, } if t.Cfg.Querier.PerRequestLimitsEnabled { @@ -779,7 +804,6 @@ func (t *Loki) setupModuleManager() error { a[idx] = InternalServer deps[key] = a } - } for mod, targets := range deps { diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 2f8e977be7fae..946f434637b21 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -51,6 +51,7 @@ import ( "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/transport" "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/v1/frontendv1pb" "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/v2/frontendv2pb" + "github.com/grafana/loki/v3/pkg/pattern" "github.com/grafana/loki/v3/pkg/querier" "github.com/grafana/loki/v3/pkg/querier/queryrange" "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" @@ -96,6 +97,8 @@ const ( Querier string = "querier" CacheGenerationLoader string = "cache-generation-loader" Ingester string = "ingester" + PatternIngester string = "pattern-ingester" + PatternRingClient string = "pattern-ring-client" IngesterQuerier string = "ingester-querier" IngesterGRPCInterceptors string = "ingester-query-tags-interceptors" QueryFrontend string = "query-frontend" @@ -313,6 +316,14 @@ func (t *Loki) initTenantConfigs() (_ services.Service, err error) { } func (t *Loki) initDistributor() (services.Service, error) { + if t.Cfg.Pattern.Enabled { + patternTee, err := pattern.NewTee(t.Cfg.Pattern, t.PatternRingClient, t.Cfg.MetricsNamespace, prometheus.DefaultRegisterer, util_log.Logger) + if err != nil { + return nil, err + } + t.Tee = distributor.WrapTee(t.Tee, patternTee) + } + var err error logger := log.With(util_log.Logger, "component", "distributor") t.distributor, err = distributor.New( @@ -383,7 +394,13 @@ func (t *Loki) initQuerier() (services.Service, error) { if err != nil { return nil, err } - + if t.Cfg.Pattern.Enabled { + patternQuerier, err := pattern.NewIngesterQuerier(t.Cfg.Pattern, t.PatternRingClient, t.Cfg.MetricsNamespace, prometheus.DefaultRegisterer, util_log.Logger) + if err != nil { + return nil, err + } + q.WithPatternQuerier(patternQuerier) + } if t.Cfg.Querier.MultiTenantQueriesEnabled { t.Querier = querier.NewMultiTenantQuerier(q, util_log.Logger) } else { @@ -500,6 +517,7 @@ func (t *Loki) initQuerier() (services.Service, error) { router.Path("/loki/api/v1/index/shards").Methods("GET", "POST").Handler(indexShardsHTTPMiddleware.Wrap(httpHandler)) router.Path("/loki/api/v1/index/volume").Methods("GET", "POST").Handler(volumeHTTPMiddleware.Wrap(httpHandler)) router.Path("/loki/api/v1/index/volume_range").Methods("GET", "POST").Handler(volumeRangeHTTPMiddleware.Wrap(httpHandler)) + router.Path("/loki/api/v1/patterns").Methods("GET", "POST").Handler(httpHandler) router.Path("/api/prom/query").Methods("GET", "POST").Handler( middleware.Merge( @@ -594,6 +612,37 @@ func (t *Loki) initIngester() (_ services.Service, err error) { return t.Ingester, nil } +func (t *Loki) initPatternIngester() (_ services.Service, err error) { + if !t.Cfg.Pattern.Enabled { + return nil, nil + } + t.Cfg.Pattern.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort + t.PatternIngester, err = pattern.New(t.Cfg.Pattern, t.Cfg.MetricsNamespace, prometheus.DefaultRegisterer, util_log.Logger) + if err != nil { + return nil, err + } + logproto.RegisterPatternServer(t.Server.GRPC, t.PatternIngester) + + t.Server.HTTP.Path("/pattern/ring").Methods("GET", "POST").Handler(t.PatternIngester) + + if t.Cfg.InternalServer.Enable { + t.InternalServer.HTTP.Path("/pattern/ring").Methods("GET", "POST").Handler(t.PatternIngester) + } + return t.PatternIngester, nil +} + +func (t *Loki) initPatternRingClient() (_ services.Service, err error) { + if !t.Cfg.Pattern.Enabled { + return nil, nil + } + ringClient, err := pattern.NewRingClient(t.Cfg.Pattern, t.Cfg.MetricsNamespace, prometheus.DefaultRegisterer, util_log.Logger) + if err != nil { + return nil, err + } + t.PatternRingClient = ringClient + return ringClient, nil +} + func (t *Loki) initTableManager() (services.Service, error) { level.Warn(util_log.Logger).Log("msg", "table manager is deprecated. Consider migrating to tsdb index which relies on a compactor instead.") @@ -858,6 +907,7 @@ type ingesterQueryOptions struct { func (i ingesterQueryOptions) QueryStoreOnly() bool { return i.Config.QueryStoreOnly } + func (i ingesterQueryOptions) QueryIngestersWithin() time.Duration { return i.Config.QueryIngestersWithin } @@ -1037,6 +1087,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { t.Server.HTTP.Path("/loki/api/v1/label/{name}/values").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/series").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/detected_fields").Methods("GET", "POST").Handler(frontendHandler) + t.Server.HTTP.Path("/loki/api/v1/patterns").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/detected_labels").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/index/stats").Methods("GET", "POST").Handler(frontendHandler) t.Server.HTTP.Path("/loki/api/v1/index/shards").Methods("GET", "POST").Handler(frontendHandler) @@ -1127,7 +1178,6 @@ func (t *Loki) initRuler() (_ services.Service, err error) { t.Overrides, t.Cfg.MetricsNamespace, ) - if err != nil { return } @@ -1245,6 +1295,7 @@ func (t *Loki) initMemberlistKV() (services.Service, error) { t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.BloomGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.BloomCompactor.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV + t.Cfg.Pattern.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Server.HTTP.Handle("/memberlist", t.MemberlistKV) if t.Cfg.InternalServer.Enable { @@ -1446,7 +1497,6 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) { managerMode = lokiring.ServerMode } rm, err := lokiring.NewRingManager(indexGatewayRingKey, managerMode, t.Cfg.IndexGateway.Ring, t.Cfg.IndexGateway.Ring.ReplicationFactor, indexgateway.NumTokens, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { return nil, gerrors.Wrap(err, "new index gateway ring manager") } @@ -1543,7 +1593,6 @@ func (t *Loki) initQuerySchedulerRing() (_ services.Service, err error) { managerMode = lokiring.ServerMode } rm, err := lokiring.NewRingManager(schedulerRingKey, managerMode, t.Cfg.QueryScheduler.SchedulerRing, scheduler.ReplicationFactor, scheduler.NumTokens, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { return nil, gerrors.Wrap(err, "new scheduler ring manager") } diff --git a/pkg/pattern/clientpool/client.go b/pkg/pattern/clientpool/client.go new file mode 100644 index 0000000000000..f28623a3e9d0b --- /dev/null +++ b/pkg/pattern/clientpool/client.go @@ -0,0 +1,104 @@ +package clientpool + +import ( + "flag" + "io" + "time" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/util/server" + + "github.com/grafana/dskit/grpcclient" + "github.com/grafana/dskit/middleware" + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" +) + +var ingesterClientRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "loki_pattern_ingester_client_request_duration_seconds", + Help: "Time spent doing pattern Ingester requests.", + Buckets: prometheus.ExponentialBuckets(0.001, 4, 6), +}, []string{"operation", "status_code"}) + +type HealthAndIngesterClient interface { + grpc_health_v1.HealthClient + Close() error +} + +type ClosableHealthAndIngesterClient struct { + logproto.PatternClient + grpc_health_v1.HealthClient + io.Closer +} + +// Config for an ingester client. +type Config struct { + PoolConfig PoolConfig `yaml:"pool_config,omitempty" doc:"description=Configures how connections are pooled."` + RemoteTimeout time.Duration `yaml:"remote_timeout,omitempty"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures how the gRPC connection to ingesters work as a client."` + GRPCUnaryClientInterceptors []grpc.UnaryClientInterceptor `yaml:"-"` + GRCPStreamClientInterceptors []grpc.StreamClientInterceptor `yaml:"-"` + + // Internal is used to indicate that this client communicates on behalf of + // a machine and not a user. When Internal = true, the client won't attempt + // to inject an userid into the context. + Internal bool `yaml:"-"` +} + +// RegisterFlags registers flags. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("pattern-ingester.client", f) + cfg.PoolConfig.RegisterFlagsWithPrefix("pattern-ingester.", f) + + f.DurationVar(&cfg.PoolConfig.RemoteTimeout, "pattern-ingester.client.healthcheck-timeout", 1*time.Second, "How quickly a dead client will be removed after it has been detected to disappear. Set this to a value to allow time for a secondary health check to recover the missing client.") + f.DurationVar(&cfg.RemoteTimeout, "pattern-ingester.client.timeout", 5*time.Second, "The remote request timeout on the client side.") +} + +// New returns a new ingester client. +func NewClient(cfg Config, addr string) (HealthAndIngesterClient, error) { + opts := []grpc.DialOption{ + grpc.WithDefaultCallOptions(cfg.GRPCClientConfig.CallOptions()...), + } + + dialOpts, err := cfg.GRPCClientConfig.DialOption(instrumentation(&cfg)) + if err != nil { + return nil, err + } + + opts = append(opts, dialOpts...) + conn, err := grpc.Dial(addr, opts...) + if err != nil { + return nil, err + } + return ClosableHealthAndIngesterClient{ + PatternClient: logproto.NewPatternClient(conn), + HealthClient: grpc_health_v1.NewHealthClient(conn), + Closer: conn, + }, nil +} + +func instrumentation(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { + var unaryInterceptors []grpc.UnaryClientInterceptor + unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...) + unaryInterceptors = append(unaryInterceptors, server.UnaryClientQueryTagsInterceptor) + unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer())) + if !cfg.Internal { + unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor) + } + unaryInterceptors = append(unaryInterceptors, middleware.UnaryClientInstrumentInterceptor(ingesterClientRequestDuration)) + + var streamInterceptors []grpc.StreamClientInterceptor + streamInterceptors = append(streamInterceptors, cfg.GRCPStreamClientInterceptors...) + streamInterceptors = append(streamInterceptors, server.StreamClientQueryTagsInterceptor) + streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer())) + if !cfg.Internal { + streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor) + } + streamInterceptors = append(streamInterceptors, middleware.StreamClientInstrumentInterceptor(ingesterClientRequestDuration)) + + return unaryInterceptors, streamInterceptors +} diff --git a/pkg/pattern/clientpool/ingester_client_pool.go b/pkg/pattern/clientpool/ingester_client_pool.go new file mode 100644 index 0000000000000..c8ebdc3e548a4 --- /dev/null +++ b/pkg/pattern/clientpool/ingester_client_pool.go @@ -0,0 +1,46 @@ +package clientpool + +import ( + "flag" + "time" + + "github.com/go-kit/log" + "github.com/grafana/dskit/ring" + ring_client "github.com/grafana/dskit/ring/client" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var clients prometheus.Gauge + +// PoolConfig is config for creating a Pool. +type PoolConfig struct { + ClientCleanupPeriod time.Duration `yaml:"client_cleanup_period"` + HealthCheckIngesters bool `yaml:"health_check_ingesters"` + RemoteTimeout time.Duration `yaml:"remote_timeout"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet. +func (cfg *PoolConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.DurationVar(&cfg.ClientCleanupPeriod, prefix+"client-cleanup-period", 15*time.Second, "How frequently to clean up clients for ingesters that have gone away.") + f.BoolVar(&cfg.HealthCheckIngesters, prefix+"health-check-ingesters", true, "Run a health check on each ingester client during periodic cleanup.") + f.DurationVar(&cfg.RemoteTimeout, prefix+"remote-timeout", 1*time.Second, "Timeout for the health check.") +} + +func NewPool(name string, cfg PoolConfig, ring ring.ReadRing, factory ring_client.PoolFactory, logger log.Logger, metricsNamespace string) *ring_client.Pool { + poolCfg := ring_client.PoolConfig{ + CheckInterval: cfg.ClientCleanupPeriod, + HealthCheckEnabled: cfg.HealthCheckIngesters, + HealthCheckTimeout: cfg.RemoteTimeout, + } + + if clients == nil { + clients = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Name: "pattern_ingester_clients", + Help: "The current number of pattern ingester clients.", + }) + } + // TODO(chaudum): Allow configuration of metric name by the caller. + return ring_client.NewPool(name, poolCfg, ring_client.NewRingServiceDiscovery(ring), factory, clients, logger) +} diff --git a/pkg/pattern/drain/REAMDE.md b/pkg/pattern/drain/REAMDE.md new file mode 100644 index 0000000000000..dc2577c9d7e90 --- /dev/null +++ b/pkg/pattern/drain/REAMDE.md @@ -0,0 +1 @@ +Fork of https://github.com/faceair/drain with minor adjustments. diff --git a/pkg/pattern/drain/chunk.go b/pkg/pattern/drain/chunk.go new file mode 100644 index 0000000000000..e438bb6b7c561 --- /dev/null +++ b/pkg/pattern/drain/chunk.go @@ -0,0 +1,176 @@ +package drain + +import ( + "sort" + "time" + + "github.com/prometheus/common/model" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/pattern/iter" +) + +const ( + timeResolution = model.Time(int64(time.Second*10) / 1e6) + + defaultVolumeSize = 500 + + maxChunkTime = 1 * time.Hour +) + +type Chunks []Chunk + +type Chunk struct { + Samples []logproto.PatternSample +} + +func newChunk(ts model.Time) Chunk { + maxSize := int(maxChunkTime.Nanoseconds()/timeResolution.UnixNano()) + 1 + v := Chunk{Samples: make([]logproto.PatternSample, 1, maxSize)} + v.Samples[0] = logproto.PatternSample{ + Timestamp: ts, + Value: 1, + } + return v +} + +func (c Chunk) spaceFor(ts model.Time) bool { + if len(c.Samples) == 0 { + return true + } + + return ts.Sub(c.Samples[0].Timestamp) < maxChunkTime +} + +// ForRange returns samples with only the values +// in the given range [start:end). +// start and end are in milliseconds since epoch. +func (c Chunk) ForRange(start, end model.Time) []logproto.PatternSample { + if len(c.Samples) == 0 { + return nil + } + first := c.Samples[0].Timestamp + last := c.Samples[len(c.Samples)-1].Timestamp + if start >= end || first >= end || last < start { + return nil + } + var lo int + if start > first { + lo = sort.Search(len(c.Samples), func(i int) bool { + return c.Samples[i].Timestamp >= start + }) + } + hi := len(c.Samples) + if end < last { + hi = sort.Search(len(c.Samples), func(i int) bool { + return c.Samples[i].Timestamp >= end + }) + } + return c.Samples[lo:hi] +} + +func (c *Chunks) Add(ts model.Time) { + t := truncateTimestamp(ts) + + if len(*c) == 0 { + *c = append(*c, newChunk(t)) + return + } + last := &(*c)[len(*c)-1] + if last.Samples[len(last.Samples)-1].Timestamp == t { + last.Samples[len(last.Samples)-1].Value++ + return + } + if !last.spaceFor(t) { + *c = append(*c, newChunk(t)) + return + } + last.Samples = append(last.Samples, logproto.PatternSample{ + Timestamp: t, + Value: 1, + }) +} + +func (c Chunks) Iterator(pattern string, from, through model.Time) iter.Iterator { + iters := make([]iter.Iterator, 0, len(c)) + for _, chunk := range c { + samples := chunk.ForRange(from, through) + if len(samples) == 0 { + continue + } + iters = append(iters, iter.NewSlice(pattern, samples)) + } + return iter.NewNonOverlappingIterator(pattern, iters) +} + +func (c Chunks) samples() []*logproto.PatternSample { + // TODO: []*logproto.PatternSample -> []logproto.PatternSample + // Or consider AoS to SoA conversion. + totalSample := 0 + for i := range c { + totalSample += len(c[i].Samples) + } + s := make([]*logproto.PatternSample, 0, totalSample) + for _, chunk := range c { + for i := range chunk.Samples { + s = append(s, &chunk.Samples[i]) + } + } + return s +} + +func (c *Chunks) merge(samples []*logproto.PatternSample) []logproto.PatternSample { + toMerge := c.samples() + // TODO: Avoid allocating a new slice, if possible. + result := make([]logproto.PatternSample, 0, len(toMerge)+len(samples)) + var i, j int + for i < len(toMerge) && j < len(samples) { + if toMerge[i].Timestamp < samples[j].Timestamp { + result = append(result, *toMerge[i]) + i++ + } else if toMerge[i].Timestamp > samples[j].Timestamp { + result = append(result, *samples[j]) + j++ + } else { + result = append(result, logproto.PatternSample{ + Value: toMerge[i].Value + samples[j].Value, + Timestamp: toMerge[i].Timestamp, + }) + i++ + j++ + } + } + for ; i < len(toMerge); i++ { + result = append(result, *toMerge[i]) + } + for ; j < len(samples); j++ { + result = append(result, *samples[j]) + } + *c = Chunks{Chunk{Samples: result}} + return result +} + +func (c *Chunks) prune(olderThan time.Duration) { + if len(*c) == 0 { + return + } + // go for every chunks, check the last timestamp is after duration from now and remove the chunk + for i := 0; i < len(*c); i++ { + if time.Since((*c)[i].Samples[len((*c)[i].Samples)-1].Timestamp.Time()) > olderThan { + *c = append((*c)[:i], (*c)[i+1:]...) + i-- + } + } +} + +func (c *Chunks) size() int { + size := 0 + for _, chunk := range *c { + for _, sample := range chunk.Samples { + size += int(sample.Value) + } + } + return size +} + +func truncateTimestamp(ts model.Time) model.Time { return ts - ts%timeResolution } diff --git a/pkg/pattern/drain/chunk_test.go b/pkg/pattern/drain/chunk_test.go new file mode 100644 index 0000000000000..e75c70411a7be --- /dev/null +++ b/pkg/pattern/drain/chunk_test.go @@ -0,0 +1,296 @@ +package drain + +import ( + "reflect" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func TestAdd(t *testing.T) { + cks := Chunks{} + cks.Add(timeResolution + 1) + cks.Add(timeResolution + 2) + cks.Add(2*timeResolution + 1) + require.Equal(t, 1, len(cks)) + require.Equal(t, 2, len(cks[0].Samples)) + cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) + timeResolution + 1) + require.Equal(t, 2, len(cks)) + require.Equal(t, 1, len(cks[1].Samples)) +} + +func TestIterator(t *testing.T) { + cks := Chunks{} + cks.Add(timeResolution + 1) + cks.Add(timeResolution + 2) + cks.Add(2*timeResolution + 1) + cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) + timeResolution + 1) + + it := cks.Iterator("test", model.Time(0), model.Time(time.Hour.Nanoseconds())) + require.NotNil(t, it) + + var samples []logproto.PatternSample + for it.Next() { + samples = append(samples, it.At()) + } + require.NoError(t, it.Close()) + require.Equal(t, 3, len(samples)) + require.Equal(t, []logproto.PatternSample{ + {Timestamp: 10000, Value: 2}, + {Timestamp: 20000, Value: 1}, + {Timestamp: 3610000, Value: 1}, + }, samples) +} + +func TestForRange(t *testing.T) { + testCases := []struct { + name string + c *Chunk + start model.Time + end model.Time + expected []logproto.PatternSample + }{ + { + name: "Empty Volume", + c: &Chunk{}, + start: 1, + end: 10, + expected: nil, + }, + { + name: "No Overlap", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }}, + start: 10, + end: 20, + expected: nil, + }, + { + name: "Complete Overlap", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }}, + start: 0, + end: 10, + expected: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }, + }, + { + name: "Partial Overlap", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }}, + start: 2, + end: 4, + expected: []logproto.PatternSample{{Timestamp: 3, Value: 4}}, + }, + { + name: "Single Element in Range", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }}, + start: 3, + end: 4, + expected: []logproto.PatternSample{{Timestamp: 3, Value: 4}}, + }, + { + name: "Start Before First Element", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }}, + start: 0, + end: 4, + expected: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + }, + }, + { + name: "End After Last Element", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }}, + start: 4, + end: 10, + expected: []logproto.PatternSample{ + {Timestamp: 5, Value: 6}, + }, + }, + { + name: "Start and End Before First Element", + c: &Chunk{Samples: []logproto.PatternSample{ + {Timestamp: 1, Value: 2}, + {Timestamp: 3, Value: 4}, + {Timestamp: 5, Value: 6}, + }}, + start: 0, + end: 1, + expected: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := tc.c.ForRange(tc.start, tc.end) + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Expected %v, got %v", tc.expected, result) + } + }) + } +} + +func TestMerge(t *testing.T) { + tests := []struct { + x Chunks + samples []*logproto.PatternSample + expected []logproto.PatternSample + }{ + { + x: Chunks{ + Chunk{ + Samples: []logproto.PatternSample{ + {Value: 10, Timestamp: 1}, + {Value: 20, Timestamp: 2}, + {Value: 30, Timestamp: 4}, + }, + }, + }, + samples: []*logproto.PatternSample{ + {Value: 5, Timestamp: 1}, + {Value: 15, Timestamp: 3}, + {Value: 25, Timestamp: 4}, + }, + expected: []logproto.PatternSample{ + {Value: 15, Timestamp: 1}, + {Value: 20, Timestamp: 2}, + {Value: 15, Timestamp: 3}, + {Value: 55, Timestamp: 4}, + }, + }, + { + x: Chunks{ + Chunk{ + Samples: []logproto.PatternSample{ + {Value: 5, Timestamp: 1}, + {Value: 15, Timestamp: 3}, + {Value: 25, Timestamp: 4}, + }, + }, + }, + samples: []*logproto.PatternSample{ + {Value: 10, Timestamp: 1}, + {Value: 20, Timestamp: 2}, + {Value: 30, Timestamp: 4}, + }, + expected: []logproto.PatternSample{ + {Value: 15, Timestamp: 1}, + {Value: 20, Timestamp: 2}, + {Value: 15, Timestamp: 3}, + {Value: 55, Timestamp: 4}, + }, + }, + { + x: Chunks{ + Chunk{ + Samples: []logproto.PatternSample{ + {Value: 10, Timestamp: 1}, + {Value: 20, Timestamp: 2}, + {Value: 30, Timestamp: 4}, + }, + }, + }, + samples: []*logproto.PatternSample{}, + expected: []logproto.PatternSample{ + {Value: 10, Timestamp: 1}, + {Value: 20, Timestamp: 2}, + {Value: 30, Timestamp: 4}, + }, + }, + } + + for _, test := range tests { + result := test.x.merge(test.samples) + if !reflect.DeepEqual(result, test.expected) { + t.Errorf("Expected: %v, Got: %v", test.expected, result) + } + } +} + +func TestPrune(t *testing.T) { + olderThan := time.Hour * 3 + + t.Run("Empty Chunks", func(t *testing.T) { + cks := Chunks{} + cks.prune(olderThan) + require.Empty(t, cks) + }) + + t.Run("No Pruning", func(t *testing.T) { + cks := Chunks{ + Chunk{ + Samples: []logproto.PatternSample{ + {Timestamp: model.TimeFromUnixNano(time.Now().UnixNano() - (olderThan.Nanoseconds()) + (1 * time.Minute).Nanoseconds())}, + {Timestamp: model.TimeFromUnixNano(time.Now().UnixNano() - (olderThan.Nanoseconds()) + (2 * time.Minute).Nanoseconds())}, + }, + }, + } + cks.prune(olderThan) + require.Len(t, cks, 1) + }) + + now := time.Now() + t.Run("Pruning", func(t *testing.T) { + cks := Chunks{ + Chunk{ + Samples: []logproto.PatternSample{ + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) - (1 * time.Minute).Nanoseconds())}, + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) - (2 * time.Minute).Nanoseconds())}, + }, + }, + Chunk{ + Samples: []logproto.PatternSample{ + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) - (1 * time.Minute).Nanoseconds())}, + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) - (2 * time.Minute).Nanoseconds())}, + }, + }, + Chunk{ + Samples: []logproto.PatternSample{ + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) + (1 * time.Minute).Nanoseconds())}, + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) + (2 * time.Minute).Nanoseconds())}, + }, + }, + Chunk{ + Samples: []logproto.PatternSample{ + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) - (1 * time.Minute).Nanoseconds())}, + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) - (2 * time.Minute).Nanoseconds())}, + }, + }, + } + cks.prune(olderThan) + require.Len(t, cks, 1) + require.Equal(t, []logproto.PatternSample{ + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) + (1 * time.Minute).Nanoseconds())}, + {Timestamp: model.TimeFromUnixNano(now.UnixNano() - (olderThan.Nanoseconds()) + (2 * time.Minute).Nanoseconds())}, + }, cks[0].Samples) + }) +} diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go new file mode 100644 index 0000000000000..eb82085d6f797 --- /dev/null +++ b/pkg/pattern/drain/drain.go @@ -0,0 +1,441 @@ +// MIT License +// +// Copyright (c) 2022 faceair +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package drain + +import ( + "math" + "strconv" + "strings" + "unicode" + + "github.com/hashicorp/golang-lru/v2/simplelru" + "github.com/prometheus/common/model" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +type Config struct { + maxNodeDepth int + LogClusterDepth int + SimTh float64 + MaxChildren int + ExtraDelimiters []string + MaxClusters int + ParamString string +} + +func createLogClusterCache(maxSize int) *LogClusterCache { + if maxSize == 0 { + maxSize = math.MaxInt + } + cache, _ := simplelru.NewLRU[int, *LogCluster](maxSize, nil) + return &LogClusterCache{ + cache: cache, + } +} + +type LogClusterCache struct { + cache simplelru.LRUCache[int, *LogCluster] +} + +func (c *LogClusterCache) Values() []*LogCluster { + values := make([]*LogCluster, 0) + for _, key := range c.cache.Keys() { + if value, ok := c.cache.Peek(key); ok { + values = append(values, value) + } + } + return values +} + +func (c *LogClusterCache) Set(key int, cluster *LogCluster) { + c.cache.Add(key, cluster) +} + +func (c *LogClusterCache) Iterate(fn func(*LogCluster) bool) { + for _, key := range c.cache.Keys() { + if value, ok := c.cache.Peek(key); ok { + if !fn(value) { + return + } + } + } +} + +func (c *LogClusterCache) Get(key int) *LogCluster { + cluster, ok := c.cache.Get(key) + if !ok { + return nil + } + return cluster +} + +func createNode() *Node { + return &Node{ + keyToChildNode: make(map[string]*Node), + clusterIDs: make([]int, 0), + } +} + +type Node struct { + keyToChildNode map[string]*Node + clusterIDs []int +} + +func DefaultConfig() *Config { + return &Config{ + LogClusterDepth: 8, + SimTh: 0.3, + MaxChildren: 100, + ParamString: "<_>", + MaxClusters: 0, + } +} + +func New(config *Config) *Drain { + if config.LogClusterDepth < 3 { + panic("depth argument must be at least 3") + } + config.maxNodeDepth = config.LogClusterDepth - 2 + + d := &Drain{ + config: config, + rootNode: createNode(), + idToCluster: createLogClusterCache(config.MaxClusters), + } + return d +} + +type Drain struct { + config *Config + rootNode *Node + idToCluster *LogClusterCache + clustersCounter int +} + +func (d *Drain) Clusters() []*LogCluster { + return d.idToCluster.Values() +} + +func (d *Drain) TrainTokens(tokens []string, stringer func([]string) string, ts int64) *LogCluster { + return d.train(tokens, stringer, ts) +} + +func (d *Drain) Train(content string, ts int64) *LogCluster { + return d.train(d.getContentAsTokens(content), nil, ts) +} + +func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64) *LogCluster { + matchCluster := d.treeSearch(d.rootNode, tokens, d.config.SimTh, false) + // Match no existing log cluster + if matchCluster == nil { + d.clustersCounter++ + clusterID := d.clustersCounter + matchCluster = &LogCluster{ + Tokens: tokens, + id: clusterID, + Size: 1, + Stringer: stringer, + Chunks: Chunks{}, + } + matchCluster.append(model.TimeFromUnixNano(ts)) + d.idToCluster.Set(clusterID, matchCluster) + d.addSeqToPrefixTree(d.rootNode, matchCluster) + } else { + newTemplateTokens := d.createTemplate(tokens, matchCluster.Tokens) + matchCluster.Tokens = newTemplateTokens + matchCluster.append(model.TimeFromUnixNano(ts)) + // Touch cluster to update its state in the cache. + d.idToCluster.Get(matchCluster.id) + } + return matchCluster +} + +func (d *Drain) TrainPattern(content string, samples []*logproto.PatternSample) *LogCluster { + tokens := tokenizePattern(content, d.config.ParamString) + matchCluster := d.treeSearch(d.rootNode, tokens, d.config.SimTh, false) + // Match no existing log cluster + if matchCluster == nil { + d.clustersCounter++ + clusterID := d.clustersCounter + matchCluster = &LogCluster{ + Tokens: tokens, + id: clusterID, + } + d.idToCluster.Set(clusterID, matchCluster) + d.addSeqToPrefixTree(d.rootNode, matchCluster) + } else { + newTemplateTokens := d.createTemplate(tokens, matchCluster.Tokens) + matchCluster.Tokens = newTemplateTokens + // Touch cluster to update its state in the cache. + d.idToCluster.Get(matchCluster.id) + } + matchCluster.merge(samples) + return matchCluster +} + +func tokenizePattern(content, param string) []string { + return deduplicatePlaceholders(strings.Split(content, " "), param) +} + +func deduplicatePlaceholders(tokens []string, param string) []string { + if len(tokens) < 2 { + return tokens + } + i := 1 + for k := 1; k < len(tokens); k++ { + if tokens[k] != param || tokens[k] != tokens[k-1] { + if i != k { + tokens[i] = tokens[k] + } + i++ + } + } + return tokens[:i] +} + +func (d *Drain) PatternString(c *LogCluster) string { + s := strings.Join(deduplicatePlaceholders(c.Tokens, d.config.ParamString), " ") + if s == d.config.ParamString { + return "" + } + return s +} + +func (d *Drain) Delete(cluster *LogCluster) { + d.idToCluster.cache.Remove(cluster.id) +} + +// Match against an already existing cluster. Match shall be perfect (sim_th=1.0). New cluster will not be created as a result of this call, nor any cluster modifications. +func (d *Drain) Match(content string) *LogCluster { + contentTokens := d.getContentAsTokens(content) + matchCluster := d.treeSearch(d.rootNode, contentTokens, 1.0, true) + return matchCluster +} + +func (d *Drain) getContentAsTokens(content string) []string { + content = strings.TrimSpace(content) + for _, extraDelimiter := range d.config.ExtraDelimiters { + content = strings.Replace(content, extraDelimiter, " ", -1) + } + return strings.Split(content, " ") +} + +func (d *Drain) treeSearch(rootNode *Node, tokens []string, simTh float64, includeParams bool) *LogCluster { + tokenCount := len(tokens) + + // at first level, children are grouped by token (word) count + curNode, ok := rootNode.keyToChildNode[strconv.Itoa(tokenCount)] + + // no template with same token count yet + if !ok { + return nil + } + + // handle case of empty log string - return the single cluster in that group + if tokenCount < 2 { + return d.idToCluster.Get(curNode.clusterIDs[0]) + } + + // find the leaf node for this log - a path of nodes matching the first N tokens (N=tree depth) + curNodeDepth := 1 + for _, token := range tokens { + // at max depth + if curNodeDepth >= d.config.maxNodeDepth { + break + } + + // this is last token + if curNodeDepth == tokenCount { + break + } + + keyToChildNode := curNode.keyToChildNode + curNode, ok = keyToChildNode[token] + if !ok { // no exact next token exist, try wildcard node + curNode, ok = keyToChildNode[d.config.ParamString] + } + if !ok { // no wildcard node exist + return nil + } + curNodeDepth++ + } + + // get best match among all clusters with same prefix, or None if no match is above sim_th + cluster := d.fastMatch(curNode.clusterIDs, tokens, simTh, includeParams) + return cluster +} + +// fastMatch Find the best match for a log message (represented as tokens) versus a list of clusters +func (d *Drain) fastMatch(clusterIDs []int, tokens []string, simTh float64, includeParams bool) *LogCluster { + var matchCluster, maxCluster *LogCluster + + maxSim := -1.0 + maxParamCount := -1 + for _, clusterID := range clusterIDs { + // Try to retrieve cluster from cache with bypassing eviction + // algorithm as we are only testing candidates for a match. + cluster := d.idToCluster.Get(clusterID) + if cluster == nil { + continue + } + curSim, paramCount := d.getSeqDistance(cluster.Tokens, tokens, includeParams) + if paramCount < 0 { + continue + } + if curSim > maxSim || (curSim == maxSim && paramCount > maxParamCount) { + maxSim = curSim + maxParamCount = paramCount + maxCluster = cluster + } + } + if maxSim >= simTh { + matchCluster = maxCluster + } + return matchCluster +} + +func (d *Drain) getSeqDistance(clusterTokens, tokens []string, includeParams bool) (float64, int) { + if len(clusterTokens) != len(tokens) { + panic("seq1 seq2 be of same length") + } + + simTokens := 0 + paramCount := 0 + for i := range clusterTokens { + token1 := clusterTokens[i] + token2 := tokens[i] + // Require exact match for marked tokens + if len(token1) > 0 && token1[0] == 0 && token1 != token2 { + return 0, -1 + } + if token1 == d.config.ParamString { + paramCount++ + } else if token1 == token2 { + simTokens++ + } + } + if includeParams { + simTokens += paramCount + } + retVal := float64(simTokens) / float64(len(clusterTokens)) + return retVal, paramCount +} + +func (d *Drain) addSeqToPrefixTree(rootNode *Node, cluster *LogCluster) { + tokenCount := len(cluster.Tokens) + tokenCountStr := strconv.Itoa(tokenCount) + + firstLayerNode, ok := rootNode.keyToChildNode[tokenCountStr] + if !ok { + firstLayerNode = createNode() + rootNode.keyToChildNode[tokenCountStr] = firstLayerNode + } + curNode := firstLayerNode + + // handle case of empty log string + if tokenCount == 0 { + curNode.clusterIDs = append(curNode.clusterIDs, cluster.id) + return + } + + currentDepth := 1 + for _, token := range cluster.Tokens { + // if at max depth or this is last token in template - add current log cluster to the leaf node + if (currentDepth >= d.config.maxNodeDepth) || currentDepth >= tokenCount { + // clean up stale clusters before adding a new one. + newClusterIDs := make([]int, 0, len(curNode.clusterIDs)) + for _, clusterID := range curNode.clusterIDs { + if d.idToCluster.Get(clusterID) != nil { + newClusterIDs = append(newClusterIDs, clusterID) + } + } + newClusterIDs = append(newClusterIDs, cluster.id) + curNode.clusterIDs = newClusterIDs + break + } + + // if token not matched in this layer of existing tree. + if _, ok = curNode.keyToChildNode[token]; !ok { + if !d.hasNumbers(token) { + if _, ok = curNode.keyToChildNode[d.config.ParamString]; ok { + if len(curNode.keyToChildNode) < d.config.MaxChildren { + newNode := createNode() + curNode.keyToChildNode[token] = newNode + curNode = newNode + } else { + curNode = curNode.keyToChildNode[d.config.ParamString] + } + } else { + if len(curNode.keyToChildNode)+1 < d.config.MaxChildren { + newNode := createNode() + curNode.keyToChildNode[token] = newNode + curNode = newNode + } else if len(curNode.keyToChildNode)+1 == d.config.MaxChildren { + newNode := createNode() + curNode.keyToChildNode[d.config.ParamString] = newNode + curNode = newNode + } else { + curNode = curNode.keyToChildNode[d.config.ParamString] + } + } + } else { + if _, ok = curNode.keyToChildNode[d.config.ParamString]; !ok { + newNode := createNode() + curNode.keyToChildNode[d.config.ParamString] = newNode + curNode = newNode + } else { + curNode = curNode.keyToChildNode[d.config.ParamString] + } + } + } else { + // if the token is matched + curNode = curNode.keyToChildNode[token] + } + + currentDepth++ + } +} + +func (d *Drain) hasNumbers(s string) bool { + for _, c := range s { + if unicode.IsNumber(c) { + return true + } + } + return false +} + +func (d *Drain) createTemplate(tokens, matchClusterTokens []string) []string { + if len(tokens) != len(matchClusterTokens) { + panic("seq1 seq2 be of same length") + } + retVal := make([]string, len(matchClusterTokens)) + copy(retVal, matchClusterTokens) + for i := range tokens { + if tokens[i] != matchClusterTokens[i] { + retVal[i] = d.config.ParamString + } + } + return retVal +} diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go new file mode 100644 index 0000000000000..74b1e76ede126 --- /dev/null +++ b/pkg/pattern/drain/drain_test.go @@ -0,0 +1 @@ +package drain diff --git a/pkg/pattern/drain/log_cluster.go b/pkg/pattern/drain/log_cluster.go new file mode 100644 index 0000000000000..26dda97a7a16d --- /dev/null +++ b/pkg/pattern/drain/log_cluster.go @@ -0,0 +1,57 @@ +package drain + +import ( + "strings" + "time" + + "github.com/prometheus/common/model" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/pattern/iter" +) + +type LogCluster struct { + id int + Size int + Tokens []string + Stringer func([]string) string + Chunks Chunks +} + +func (c *LogCluster) String() string { + if c.Stringer != nil { + return c.Stringer(c.Tokens) + } + return strings.Join(c.Tokens, " ") +} + +func (c *LogCluster) append(ts model.Time) { + c.Size++ + c.Chunks.Add(ts) +} + +func (c *LogCluster) merge(samples []*logproto.PatternSample) { + c.Size += int(sumSize(samples)) + c.Chunks.merge(samples) +} + +func (c *LogCluster) Iterator(from, through model.Time) iter.Iterator { + return c.Chunks.Iterator(c.String(), from, through) +} + +func (c *LogCluster) Samples() []*logproto.PatternSample { + return c.Chunks.samples() +} + +func (c *LogCluster) Prune(olderThan time.Duration) { + c.Chunks.prune(olderThan) + c.Size = c.Chunks.size() +} + +func sumSize(samples []*logproto.PatternSample) int64 { + var x int64 + for i := range samples { + x += samples[i].Value + } + return x +} diff --git a/pkg/pattern/drain/log_cluster_test.go b/pkg/pattern/drain/log_cluster_test.go new file mode 100644 index 0000000000000..6c89cfb14edb1 --- /dev/null +++ b/pkg/pattern/drain/log_cluster_test.go @@ -0,0 +1,4 @@ +package drain + +// func TestSampleIterator(t *testing.T) { +// } diff --git a/pkg/pattern/flush.go b/pkg/pattern/flush.go new file mode 100644 index 0000000000000..d53b486a168c5 --- /dev/null +++ b/pkg/pattern/flush.go @@ -0,0 +1,75 @@ +package pattern + +import ( + "fmt" + "time" + + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + + "github.com/grafana/loki/v3/pkg/util" +) + +const retainSampleFor = 3 * time.Hour + +func (i *Ingester) initFlushQueues() { + // i.flushQueuesDone.Add(i.cfg.ConcurrentFlushes) + for j := 0; j < i.cfg.ConcurrentFlushes; j++ { + i.flushQueues[j] = util.NewPriorityQueue(i.metrics.flushQueueLength) + // for now we don't flush only prune old samples. + // go i.flushLoop(j) + } +} + +func (i *Ingester) Flush() { + i.flush(true) +} + +func (i *Ingester) flush(mayRemoveStreams bool) { + i.sweepUsers(true, mayRemoveStreams) + + // Close the flush queues, to unblock waiting workers. + for _, flushQueue := range i.flushQueues { + flushQueue.Close() + } + + i.flushQueuesDone.Wait() + level.Debug(i.logger).Log("msg", "flush queues have drained") +} + +type flushOp struct { + from model.Time + userID string + fp model.Fingerprint + immediate bool +} + +func (o *flushOp) Key() string { + return fmt.Sprintf("%s-%s-%v", o.userID, o.fp, o.immediate) +} + +func (o *flushOp) Priority() int64 { + return -int64(o.from) +} + +// sweepUsers periodically schedules series for flushing and garbage collects users with no series +func (i *Ingester) sweepUsers(immediate, mayRemoveStreams bool) { + instances := i.getInstances() + + for _, instance := range instances { + i.sweepInstance(instance, immediate, mayRemoveStreams) + } +} + +func (i *Ingester) sweepInstance(instance *instance, _, mayRemoveStreams bool) { + _ = instance.streams.ForEach(func(s *stream) (bool, error) { + if mayRemoveStreams { + instance.streams.WithLock(func() { + if s.prune(retainSampleFor) { + instance.removeStream(s) + } + }) + } + return true, nil + }) +} diff --git a/pkg/pattern/flush_test.go b/pkg/pattern/flush_test.go new file mode 100644 index 0000000000000..4d70eea5c3e10 --- /dev/null +++ b/pkg/pattern/flush_test.go @@ -0,0 +1,98 @@ +package pattern + +import ( + "context" + "math" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/grafana/dskit/user" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/pattern/iter" + + "github.com/grafana/loki/pkg/push" +) + +func TestSweepInstance(t *testing.T) { + ing, err := New(defaultIngesterTestConfig(t), "foo", prometheus.DefaultRegisterer, log.NewNopLogger()) + require.NoError(t, err) + defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck + err = services.StartAndAwaitRunning(context.Background(), ing) + require.NoError(t, err) + + lbs := labels.New(labels.Label{Name: "test", Value: "test"}) + ctx := user.InjectOrgID(context.Background(), "foo") + _, err = ing.Push(ctx, &push.PushRequest{ + Streams: []push.Stream{ + { + Labels: lbs.String(), + Entries: []push.Entry{ + { + Timestamp: time.Unix(20, 0), + Line: "ts=1 msg=hello", + }, + }, + }, + { + Labels: `{test="test",foo="bar"}`, + Entries: []push.Entry{ + { + Timestamp: time.Now(), + Line: "ts=1 msg=foo", + }, + }, + }, + }, + }) + require.NoError(t, err) + + inst, _ := ing.getInstanceByID("foo") + + it, err := inst.Iterator(ctx, &logproto.QueryPatternsRequest{ + Query: `{test="test"}`, + Start: time.Unix(0, 0), + End: time.Unix(0, math.MaxInt64), + }) + require.NoError(t, err) + res, err := iter.ReadAll(it) + require.NoError(t, err) + require.Equal(t, 2, len(res.Series)) + ing.sweepUsers(true, true) + it, err = inst.Iterator(ctx, &logproto.QueryPatternsRequest{ + Query: `{test="test"}`, + Start: time.Unix(0, 0), + End: time.Unix(0, math.MaxInt64), + }) + require.NoError(t, err) + res, err = iter.ReadAll(it) + require.NoError(t, err) + require.Equal(t, 1, len(res.Series)) +} + +func defaultIngesterTestConfig(t testing.TB) Config { + kvClient, err := kv.NewClient(kv.Config{Store: "inmemory"}, ring.GetCodec(), nil, log.NewNopLogger()) + require.NoError(t, err) + + cfg := Config{} + flagext.DefaultValues(&cfg) + cfg.FlushCheckPeriod = 99999 * time.Hour + cfg.ConcurrentFlushes = 1 + cfg.LifecyclerConfig.RingConfig.KVStore.Mock = kvClient + cfg.LifecyclerConfig.NumTokens = 1 + cfg.LifecyclerConfig.ListenPort = 0 + cfg.LifecyclerConfig.Addr = "localhost" + cfg.LifecyclerConfig.ID = "localhost" + cfg.LifecyclerConfig.FinalSleep = 0 + cfg.LifecyclerConfig.MinReadyDuration = 0 + + return cfg +} diff --git a/pkg/pattern/ingester.go b/pkg/pattern/ingester.go new file mode 100644 index 0000000000000..af2e842c28b83 --- /dev/null +++ b/pkg/pattern/ingester.go @@ -0,0 +1,302 @@ +package pattern + +import ( + "context" + "errors" + "flag" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/grafana/dskit/tenant" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc/health/grpc_health_v1" + + ring_client "github.com/grafana/dskit/ring/client" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/pattern/clientpool" + "github.com/grafana/loki/v3/pkg/pattern/iter" + "github.com/grafana/loki/v3/pkg/util" + util_log "github.com/grafana/loki/v3/pkg/util/log" +) + +const readBatchSize = 1024 + +type Config struct { + Enabled bool `yaml:"enabled,omitempty" doc:"description=Whether the pattern ingester is enabled."` + LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty" doc:"description=Configures how the lifecycle of the pattern ingester will operate and where it will register for discovery."` + ClientConfig clientpool.Config `yaml:"client_config,omitempty" doc:"description=Configures how the pattern ingester will connect to the ingesters."` + ConcurrentFlushes int `yaml:"concurrent_flushes"` + FlushCheckPeriod time.Duration `yaml:"flush_check_period"` + + // For testing. + factory ring_client.PoolFactory `yaml:"-"` +} + +// RegisterFlags registers pattern ingester related flags. +func (cfg *Config) RegisterFlags(fs *flag.FlagSet) { + cfg.LifecyclerConfig.RegisterFlagsWithPrefix("pattern-ingester.", fs, util_log.Logger) + cfg.ClientConfig.RegisterFlags(fs) + fs.BoolVar(&cfg.Enabled, "pattern-ingester.enabled", false, "Flag to enable or disable the usage of the pattern-ingester component.") + fs.IntVar(&cfg.ConcurrentFlushes, "pattern-ingester.concurrent-flushes", 32, "How many flushes can happen concurrently from each stream.") + fs.DurationVar(&cfg.FlushCheckPeriod, "pattern-ingester.flush-check-period", 30*time.Second, "How often should the ingester see if there are any blocks to flush. The first flush check is delayed by a random time up to 0.8x the flush check period. Additionally, there is +/- 1% jitter added to the interval.") +} + +func (cfg *Config) Validate() error { + if cfg.LifecyclerConfig.RingConfig.ReplicationFactor != 1 { + return errors.New("pattern ingester replication factor must be 1") + } + return cfg.LifecyclerConfig.Validate() +} + +type Ingester struct { + services.Service + lifecycler *ring.Lifecycler + + lifecyclerWatcher *services.FailureWatcher + + cfg Config + registerer prometheus.Registerer + logger log.Logger + + instancesMtx sync.RWMutex + instances map[string]*instance + + // One queue per flush thread. Fingerprint is used to + // pick a queue. + flushQueues []*util.PriorityQueue + flushQueuesDone sync.WaitGroup + loopDone sync.WaitGroup + loopQuit chan struct{} + + metrics *ingesterMetrics +} + +func New( + cfg Config, + metricsNamespace string, + registerer prometheus.Registerer, + logger log.Logger, +) (*Ingester, error) { + metrics := newIngesterMetrics(registerer, metricsNamespace) + registerer = prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer) + + i := &Ingester{ + cfg: cfg, + logger: log.With(logger, "component", "pattern-ingester"), + registerer: registerer, + metrics: metrics, + instances: make(map[string]*instance), + flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), + loopQuit: make(chan struct{}), + } + i.Service = services.NewBasicService(i.starting, i.running, i.stopping) + var err error + i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "pattern-ingester", "pattern-ring", true, i.logger, registerer) + if err != nil { + return nil, err + } + + i.lifecyclerWatcher = services.NewFailureWatcher() + i.lifecyclerWatcher.WatchService(i.lifecycler) + + return i, nil +} + +// ServeHTTP implements the pattern ring status page. +func (i *Ingester) ServeHTTP(w http.ResponseWriter, r *http.Request) { + i.lifecycler.ServeHTTP(w, r) +} + +func (i *Ingester) starting(ctx context.Context) error { + // pass new context to lifecycler, so that it doesn't stop automatically when Ingester's service context is done + err := i.lifecycler.StartAsync(context.Background()) + if err != nil { + return err + } + + err = i.lifecycler.AwaitRunning(ctx) + if err != nil { + return err + } + i.initFlushQueues() + // start our loop + i.loopDone.Add(1) + go i.loop() + return nil +} + +func (i *Ingester) running(ctx context.Context) error { + var serviceError error + select { + // wait until service is asked to stop + case <-ctx.Done(): + // stop + case err := <-i.lifecyclerWatcher.Chan(): + serviceError = fmt.Errorf("lifecycler failed: %w", err) + } + + close(i.loopQuit) + i.loopDone.Wait() + return serviceError +} + +func (i *Ingester) stopping(_ error) error { + err := services.StopAndAwaitTerminated(context.Background(), i.lifecycler) + for _, flushQueue := range i.flushQueues { + flushQueue.Close() + } + i.flushQueuesDone.Wait() + return err +} + +func (i *Ingester) loop() { + defer i.loopDone.Done() + + // Delay the first flush operation by up to 0.8x the flush time period. + // This will ensure that multiple ingesters started at the same time do not + // flush at the same time. Flushing at the same time can cause concurrently + // writing the same chunk to object storage, which in AWS S3 leads to being + // rate limited. + jitter := time.Duration(rand.Int63n(int64(float64(i.cfg.FlushCheckPeriod.Nanoseconds()) * 0.8))) + initialDelay := time.NewTimer(jitter) + defer initialDelay.Stop() + + level.Info(i.logger).Log("msg", "sleeping for initial delay before starting periodic flushing", "delay", jitter) + + select { + case <-initialDelay.C: + // do nothing and continue with flush loop + case <-i.loopQuit: + // ingester stopped while waiting for initial delay + return + } + + // Add +/- 20% of flush interval as jitter. + // The default flush check period is 30s so max jitter will be 6s. + j := i.cfg.FlushCheckPeriod / 5 + flushTicker := util.NewTickerWithJitter(i.cfg.FlushCheckPeriod, j) + defer flushTicker.Stop() + + for { + select { + case <-flushTicker.C: + i.sweepUsers(false, true) + + case <-i.loopQuit: + return + } + } +} + +// Watch implements grpc_health_v1.HealthCheck. +func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error { + return nil +} + +// ReadinessHandler is used to indicate to k8s when the ingesters are ready for +// the addition removal of another ingester. Returns 204 when the ingester is +// ready, 500 otherwise. +func (i *Ingester) CheckReady(ctx context.Context) error { + if s := i.State(); s != services.Running && s != services.Stopping { + return fmt.Errorf("ingester not ready: %v", s) + } + return i.lifecycler.CheckReady(ctx) +} + +func (i *Ingester) TransferOut(_ context.Context) error { + // todo may be. + return ring.ErrTransferDisabled +} + +func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logproto.PushResponse, error) { + instanceID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + instance, err := i.GetOrCreateInstance(instanceID) + if err != nil { + return &logproto.PushResponse{}, err + } + return &logproto.PushResponse{}, instance.Push(ctx, req) +} + +func (i *Ingester) Query(req *logproto.QueryPatternsRequest, stream logproto.Pattern_QueryServer) error { + ctx := stream.Context() + instanceID, err := tenant.TenantID(ctx) + if err != nil { + return err + } + instance, err := i.GetOrCreateInstance(instanceID) + if err != nil { + return err + } + iterator, err := instance.Iterator(ctx, req) + if err != nil { + return err + } + defer util.LogErrorWithContext(ctx, "closing iterator", iterator.Close) + return sendPatternSample(ctx, iterator, stream) +} + +func sendPatternSample(ctx context.Context, it iter.Iterator, stream logproto.Pattern_QueryServer) error { + for ctx.Err() == nil { + batch, err := iter.ReadBatch(it, readBatchSize) + if err != nil { + return err + } + if err := stream.Send(batch); err != nil && err != context.Canceled { + return err + } + if len(batch.Series) == 0 { + return nil + } + } + return nil +} + +func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { //nolint:revive + inst, ok := i.getInstanceByID(instanceID) + if ok { + return inst, nil + } + + i.instancesMtx.Lock() + defer i.instancesMtx.Unlock() + inst, ok = i.instances[instanceID] + if !ok { + var err error + inst, err = newInstance(instanceID, i.logger) + if err != nil { + return nil, err + } + i.instances[instanceID] = inst + } + return inst, nil +} + +func (i *Ingester) getInstanceByID(id string) (*instance, bool) { + i.instancesMtx.RLock() + defer i.instancesMtx.RUnlock() + + inst, ok := i.instances[id] + return inst, ok +} + +func (i *Ingester) getInstances() []*instance { + i.instancesMtx.RLock() + defer i.instancesMtx.RUnlock() + + instances := make([]*instance, 0, len(i.instances)) + for _, instance := range i.instances { + instances = append(instances, instance) + } + return instances +} diff --git a/pkg/pattern/ingester_querier.go b/pkg/pattern/ingester_querier.go new file mode 100644 index 0000000000000..9aa8cc27c6e3c --- /dev/null +++ b/pkg/pattern/ingester_querier.go @@ -0,0 +1,135 @@ +package pattern + +import ( + "context" + "math" + "net/http" + + "github.com/go-kit/log" + "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/ring" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/logql/syntax" + "github.com/grafana/loki/v3/pkg/pattern/drain" + "github.com/grafana/loki/v3/pkg/pattern/iter" +) + +// TODO(kolesnikovae): parametrise QueryPatternsRequest +const minClusterSize = 30 + +type IngesterQuerier struct { + cfg Config + logger log.Logger + + ringClient *RingClient + + registerer prometheus.Registerer +} + +func NewIngesterQuerier( + cfg Config, + ringClient *RingClient, + metricsNamespace string, + registerer prometheus.Registerer, + logger log.Logger, +) (*IngesterQuerier, error) { + return &IngesterQuerier{ + logger: log.With(logger, "component", "pattern-ingester-querier"), + ringClient: ringClient, + cfg: cfg, + registerer: prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer), + }, nil +} + +func (q *IngesterQuerier) Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) { + _, err := syntax.ParseMatchers(req.Query, true) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + resps, err := q.forAllIngesters(ctx, func(_ context.Context, client logproto.PatternClient) (interface{}, error) { + return client.Query(ctx, req) + }) + if err != nil { + return nil, err + } + iterators := make([]iter.Iterator, len(resps)) + for i := range resps { + iterators[i] = iter.NewQueryClientIterator(resps[i].response.(logproto.Pattern_QueryClient)) + } + // TODO(kolesnikovae): Incorporate with pruning + resp, err := iter.ReadBatch(iter.NewMerge(iterators...), math.MaxInt32) + if err != nil { + return nil, err + } + return prunePatterns(resp, minClusterSize), nil +} + +func prunePatterns(resp *logproto.QueryPatternsResponse, minClusterSize int) *logproto.QueryPatternsResponse { + d := drain.New(drainConfig) + for _, p := range resp.Series { + d.TrainPattern(p.Pattern, p.Samples) + } + + resp.Series = resp.Series[:0] + for _, cluster := range d.Clusters() { + if cluster.Size < minClusterSize { + continue + } + pattern := d.PatternString(cluster) + if pattern == "" { + continue + } + resp.Series = append(resp.Series, &logproto.PatternSeries{ + Pattern: pattern, + Samples: cluster.Samples(), + }) + } + return resp +} + +// ForAllIngesters runs f, in parallel, for all ingesters +func (q *IngesterQuerier) forAllIngesters(ctx context.Context, f func(context.Context, logproto.PatternClient) (interface{}, error)) ([]ResponseFromIngesters, error) { + replicationSet, err := q.ringClient.ring.GetReplicationSetForOperation(ring.Read) + if err != nil { + return nil, err + } + + return q.forGivenIngesters(ctx, replicationSet, f) +} + +type ResponseFromIngesters struct { + addr string + response interface{} +} + +// forGivenIngesters runs f, in parallel, for given ingesters +func (q *IngesterQuerier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, f func(context.Context, logproto.PatternClient) (interface{}, error)) ([]ResponseFromIngesters, error) { + cfg := ring.DoUntilQuorumConfig{ + // Nothing here + } + results, err := ring.DoUntilQuorum(ctx, replicationSet, cfg, func(ctx context.Context, ingester *ring.InstanceDesc) (ResponseFromIngesters, error) { + client, err := q.ringClient.pool.GetClientFor(ingester.Addr) + if err != nil { + return ResponseFromIngesters{addr: ingester.Addr}, err + } + + resp, err := f(ctx, client.(logproto.PatternClient)) + if err != nil { + return ResponseFromIngesters{addr: ingester.Addr}, err + } + + return ResponseFromIngesters{ingester.Addr, resp}, nil + }, func(ResponseFromIngesters) { + // Nothing to do + }) + if err != nil { + return nil, err + } + + responses := make([]ResponseFromIngesters, 0, len(results)) + responses = append(responses, results...) + + return responses, err +} diff --git a/pkg/pattern/ingester_querier_test.go b/pkg/pattern/ingester_querier_test.go new file mode 100644 index 0000000000000..d1016b326df73 --- /dev/null +++ b/pkg/pattern/ingester_querier_test.go @@ -0,0 +1,43 @@ +package pattern + +import ( + "bufio" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func Test_prunePatterns(t *testing.T) { + file, err := os.Open("testdata/patterns.txt") + require.NoError(t, err) + defer file.Close() + + resp := new(logproto.QueryPatternsResponse) + scanner := bufio.NewScanner(file) + for scanner.Scan() { + resp.Series = append(resp.Series, &logproto.PatternSeries{ + Pattern: scanner.Text(), + }) + } + require.NoError(t, scanner.Err()) + prunePatterns(resp, 0) + + expectedPatterns := []string{ + `<_> caller=wrapper.go:48 level=info component=distributor msg="sample remote write" eventType=bi <_>`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations <_> handledMessageTime="2024-04-03 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" <_> +0000 UTC, <_>`, + } + + patterns := make([]string, 0, len(resp.Series)) + for _, p := range resp.Series { + patterns = append(patterns, p.Pattern) + } + + require.Equal(t, expectedPatterns, patterns) +} diff --git a/pkg/pattern/ingester_test.go b/pkg/pattern/ingester_test.go new file mode 100644 index 0000000000000..16d5d0f04189f --- /dev/null +++ b/pkg/pattern/ingester_test.go @@ -0,0 +1,63 @@ +package pattern + +import ( + "context" + "math" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/pattern/iter" + + "github.com/grafana/loki/pkg/push" +) + +func TestInstancePushQuery(t *testing.T) { + lbs := labels.New(labels.Label{Name: "test", Value: "test"}) + inst, err := newInstance("foo", log.NewNopLogger()) + require.NoError(t, err) + + err = inst.Push(context.Background(), &push.PushRequest{ + Streams: []push.Stream{ + { + Labels: lbs.String(), + Entries: []push.Entry{ + { + Timestamp: time.Unix(20, 0), + Line: "ts=1 msg=hello", + }, + }, + }, + }, + }) + for i := 0; i <= 30; i++ { + err = inst.Push(context.Background(), &push.PushRequest{ + Streams: []push.Stream{ + { + Labels: lbs.String(), + Entries: []push.Entry{ + { + Timestamp: time.Unix(20, 0), + Line: "foo bar foo bar", + }, + }, + }, + }, + }) + require.NoError(t, err) + } + require.NoError(t, err) + it, err := inst.Iterator(context.Background(), &logproto.QueryPatternsRequest{ + Query: "{test=\"test\"}", + Start: time.Unix(0, 0), + End: time.Unix(0, math.MaxInt64), + }) + require.NoError(t, err) + res, err := iter.ReadAll(it) + require.NoError(t, err) + require.Equal(t, 2, len(res.Series)) +} diff --git a/pkg/pattern/instance.go b/pkg/pattern/instance.go new file mode 100644 index 0000000000000..4b270a04ca391 --- /dev/null +++ b/pkg/pattern/instance.go @@ -0,0 +1,163 @@ +package pattern + +import ( + "context" + "fmt" + "net/http" + + "github.com/go-kit/log" + "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/multierror" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/v3/pkg/ingester" + "github.com/grafana/loki/v3/pkg/ingester/index" + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/logql/syntax" + "github.com/grafana/loki/v3/pkg/pattern/iter" + "github.com/grafana/loki/v3/pkg/util" +) + +const indexShards = 32 + +// instance is a tenant instance of the pattern ingester. +type instance struct { + instanceID string + buf []byte // buffer used to compute fps. + mapper *ingester.FpMapper // using of mapper no longer needs mutex because reading from streams is lock-free + streams *streamsMap + index *index.BitPrefixInvertedIndex + logger log.Logger +} + +func newInstance(instanceID string, logger log.Logger) (*instance, error) { + index, err := index.NewBitPrefixWithShards(indexShards) + if err != nil { + return nil, err + } + i := &instance{ + buf: make([]byte, 0, 1024), + logger: logger, + instanceID: instanceID, + streams: newStreamsMap(), + index: index, + } + i.mapper = ingester.NewFPMapper(i.getLabelsFromFingerprint) + return i, nil +} + +// Push pushes the log entries in the given PushRequest to the appropriate streams. +// It returns an error if any error occurs during the push operation. +func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { + appendErr := multierror.New() + + for _, reqStream := range req.Streams { + s, _, err := i.streams.LoadOrStoreNew(reqStream.Labels, + func() (*stream, error) { + // add stream + return i.createStream(ctx, reqStream) + }, nil) + if err != nil { + appendErr.Add(err) + continue + } + err = s.Push(ctx, reqStream.Entries) + if err != nil { + appendErr.Add(err) + continue + } + } + return appendErr.Err() +} + +// Iterator returns an iterator of pattern samples matching the given query patterns request. +func (i *instance) Iterator(ctx context.Context, req *logproto.QueryPatternsRequest) (iter.Iterator, error) { + matchers, err := syntax.ParseMatchers(req.Query, true) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + from, through := util.RoundToMilliseconds(req.Start, req.End) + + var iters []iter.Iterator + err = i.forMatchingStreams(matchers, func(s *stream) error { + iter, err := s.Iterator(ctx, from, through) + if err != nil { + return err + } + iters = append(iters, iter) + return nil + }) + if err != nil { + return nil, err + } + return iter.NewMerge(iters...), nil +} + +// forMatchingStreams will execute a function for each stream that matches the given matchers. +func (i *instance) forMatchingStreams( + matchers []*labels.Matcher, + fn func(*stream) error, +) error { + filters, matchers := util.SplitFiltersAndMatchers(matchers) + ids, err := i.index.Lookup(matchers, nil) + if err != nil { + return err + } + +outer: + for _, streamID := range ids { + stream, ok := i.streams.LoadByFP(streamID) + if !ok { + // If a stream is missing here, it has already been flushed + // and is supposed to be picked up from storage by querier + continue + } + for _, filter := range filters { + if !filter.Matches(stream.labels.Get(filter.Name)) { + continue outer + } + } + err := fn(stream) + if err != nil { + return err + } + } + return nil +} + +func (i *instance) createStream(_ context.Context, pushReqStream logproto.Stream) (*stream, error) { + labels, err := syntax.ParseLabels(pushReqStream.Labels) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + fp := i.getHashForLabels(labels) + sortedLabels := i.index.Add(logproto.FromLabelsToLabelAdapters(labels), fp) + s, err := newStream(fp, sortedLabels) + if err != nil { + return nil, fmt.Errorf("failed to create stream: %w", err) + } + return s, nil +} + +func (i *instance) getHashForLabels(ls labels.Labels) model.Fingerprint { + var fp uint64 + fp, i.buf = ls.HashWithoutLabels(i.buf, []string(nil)...) + return i.mapper.MapFP(model.Fingerprint(fp), ls) +} + +// Return labels associated with given fingerprint. Used by fingerprint mapper. +func (i *instance) getLabelsFromFingerprint(fp model.Fingerprint) labels.Labels { + s, ok := i.streams.LoadByFP(fp) + if !ok { + return nil + } + return s.labels +} + +// removeStream removes a stream from the instance. +func (i *instance) removeStream(s *stream) { + if i.streams.Delete(s) { + i.index.Delete(s.labels, s.fp) + } +} diff --git a/pkg/pattern/iter/batch.go b/pkg/pattern/iter/batch.go new file mode 100644 index 0000000000000..80ad1197c80a9 --- /dev/null +++ b/pkg/pattern/iter/batch.go @@ -0,0 +1,34 @@ +package iter + +import ( + "math" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func ReadBatch(it Iterator, batchSize int) (*logproto.QueryPatternsResponse, error) { + var ( + series = map[string][]*logproto.PatternSample{} + respSize int + ) + + for ; respSize < batchSize && it.Next(); respSize++ { + pattern := it.Pattern() + sample := it.At() + series[pattern] = append(series[pattern], &sample) + } + result := logproto.QueryPatternsResponse{ + Series: make([]*logproto.PatternSeries, 0, len(series)), + } + for pattern, samples := range series { + result.Series = append(result.Series, &logproto.PatternSeries{ + Pattern: pattern, + Samples: samples, + }) + } + return &result, it.Error() +} + +func ReadAll(it Iterator) (*logproto.QueryPatternsResponse, error) { + return ReadBatch(it, math.MaxInt32) +} diff --git a/pkg/pattern/iter/batch_test.go b/pkg/pattern/iter/batch_test.go new file mode 100644 index 0000000000000..7f544e23f417d --- /dev/null +++ b/pkg/pattern/iter/batch_test.go @@ -0,0 +1,73 @@ +package iter + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func TestReadBatch(t *testing.T) { + tests := []struct { + name string + pattern string + samples []logproto.PatternSample + batchSize int + expected *logproto.QueryPatternsResponse + }{ + { + name: "ReadBatch empty iterator", + pattern: "foo", + samples: []logproto.PatternSample{}, + batchSize: 2, + expected: &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{}, + }, + }, + { + name: "ReadBatch less than batchSize", + pattern: "foo", + samples: []logproto.PatternSample{{Timestamp: 10, Value: 2}, {Timestamp: 20, Value: 4}, {Timestamp: 30, Value: 6}}, + batchSize: 2, + expected: &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{ + { + Pattern: "foo", + Samples: []*logproto.PatternSample{ + {Timestamp: 10, Value: 2}, + {Timestamp: 20, Value: 4}, + }, + }, + }, + }, + }, + { + name: "ReadBatch more than batchSize", + pattern: "foo", + samples: []logproto.PatternSample{{Timestamp: 10, Value: 2}, {Timestamp: 20, Value: 4}, {Timestamp: 30, Value: 6}}, + batchSize: 4, + expected: &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{ + { + Pattern: "foo", + Samples: []*logproto.PatternSample{ + {Timestamp: 10, Value: 2}, + {Timestamp: 20, Value: 4}, + {Timestamp: 30, Value: 6}, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + it := NewSlice(tt.pattern, tt.samples) + got, err := ReadBatch(it, tt.batchSize) + require.NoError(t, err) + require.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/pattern/iter/iterator.go b/pkg/pattern/iter/iterator.go new file mode 100644 index 0000000000000..5a277c0f27349 --- /dev/null +++ b/pkg/pattern/iter/iterator.go @@ -0,0 +1,133 @@ +package iter + +import ( + "github.com/grafana/loki/v3/pkg/logproto" +) + +var Empty Iterator = &emptyIterator{} + +type Iterator interface { + Next() bool + + Pattern() string + At() logproto.PatternSample + + Error() error + Close() error +} + +func NewSlice(pattern string, s []logproto.PatternSample) Iterator { + return &sliceIterator{ + values: s, + pattern: pattern, + i: -1, + } +} + +type sliceIterator struct { + i int + pattern string + values []logproto.PatternSample +} + +func (s *sliceIterator) Next() bool { + s.i++ + return s.i < len(s.values) +} + +func (s *sliceIterator) Pattern() string { + return s.pattern +} + +func (s *sliceIterator) At() logproto.PatternSample { + return s.values[s.i] +} + +func (s *sliceIterator) Error() error { + return nil +} + +func (s *sliceIterator) Close() error { + return nil +} + +type emptyIterator struct { + pattern string +} + +func (e *emptyIterator) Next() bool { + return false +} + +func (e *emptyIterator) Pattern() string { + return e.pattern +} + +func (e *emptyIterator) At() logproto.PatternSample { + return logproto.PatternSample{} +} + +func (e *emptyIterator) Error() error { + return nil +} + +func (e *emptyIterator) Close() error { + return nil +} + +type nonOverlappingIterator struct { + iterators []Iterator + curr Iterator + pattern string +} + +// NewNonOverlappingIterator gives a chained iterator over a list of iterators. +func NewNonOverlappingIterator(pattern string, iterators []Iterator) Iterator { + return &nonOverlappingIterator{ + iterators: iterators, + pattern: pattern, + } +} + +func (i *nonOverlappingIterator) Next() bool { + for i.curr == nil || !i.curr.Next() { + if len(i.iterators) == 0 { + if i.curr != nil { + i.curr.Close() + } + return false + } + if i.curr != nil { + i.curr.Close() + } + i.curr, i.iterators = i.iterators[0], i.iterators[1:] + } + + return true +} + +func (i *nonOverlappingIterator) At() logproto.PatternSample { + return i.curr.At() +} + +func (i *nonOverlappingIterator) Pattern() string { + return i.pattern +} + +func (i *nonOverlappingIterator) Error() error { + if i.curr == nil { + return nil + } + return i.curr.Error() +} + +func (i *nonOverlappingIterator) Close() error { + if i.curr != nil { + i.curr.Close() + } + for _, iter := range i.iterators { + iter.Close() + } + i.iterators = nil + return nil +} diff --git a/pkg/pattern/iter/iterator_test.go b/pkg/pattern/iter/iterator_test.go new file mode 100644 index 0000000000000..b327800575b55 --- /dev/null +++ b/pkg/pattern/iter/iterator_test.go @@ -0,0 +1,72 @@ +package iter + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func TestSliceIterator(t *testing.T) { + tests := []struct { + name string + pattern string + samples []logproto.PatternSample + want []patternSample + }{ + { + name: "1 samples", + pattern: "foo", + samples: []logproto.PatternSample{ + {Timestamp: 10, Value: 2}, + }, + want: []patternSample{ + {"foo", logproto.PatternSample{Timestamp: 10, Value: 2}}, + }, + }, + { + name: "3 samples", + pattern: "foo", + samples: []logproto.PatternSample{ + {Timestamp: 10, Value: 2}, + {Timestamp: 20, Value: 4}, + {Timestamp: 30, Value: 6}, + }, + want: []patternSample{ + {"foo", logproto.PatternSample{Timestamp: 10, Value: 2}}, + {"foo", logproto.PatternSample{Timestamp: 20, Value: 4}}, + {"foo", logproto.PatternSample{Timestamp: 30, Value: 6}}, + }, + }, + { + name: "empty", + pattern: "foo", + samples: nil, + want: nil, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + got := slice(NewSlice(tt.pattern, tt.samples)) + require.Equal(t, tt.want, got) + }) + } +} + +func slice(it Iterator) []patternSample { + var samples []patternSample + defer it.Close() + for it.Next() { + samples = append(samples, patternSample{ + pattern: it.Pattern(), + sample: it.At(), + }) + } + if it.Error() != nil { + panic(it.Error()) + } + return samples +} diff --git a/pkg/pattern/iter/merge.go b/pkg/pattern/iter/merge.go new file mode 100644 index 0000000000000..3b0e07e33b8a8 --- /dev/null +++ b/pkg/pattern/iter/merge.go @@ -0,0 +1,88 @@ +package iter + +import ( + "math" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/util/loser" +) + +type mergeIterator struct { + tree *loser.Tree[patternSample, Iterator] + current patternSample + initialized bool + done bool +} + +type patternSample struct { + pattern string + sample logproto.PatternSample +} + +var max = patternSample{ + pattern: "", + sample: logproto.PatternSample{Timestamp: math.MaxInt64}, +} + +func NewMerge(iters ...Iterator) Iterator { + tree := loser.New(iters, max, func(s Iterator) patternSample { + return patternSample{ + pattern: s.Pattern(), + sample: s.At(), + } + }, func(e1, e2 patternSample) bool { + if e1.sample.Timestamp == e2.sample.Timestamp { + return e1.pattern < e2.pattern + } + return e1.sample.Timestamp < e2.sample.Timestamp + }, func(s Iterator) { + s.Close() + }) + return &mergeIterator{ + tree: tree, + } +} + +func (m *mergeIterator) Next() bool { + if m.done { + return false + } + + if !m.initialized { + m.initialized = true + if !m.tree.Next() { + m.done = true + return false + } + } + + m.current.pattern = m.tree.Winner().Pattern() + m.current.sample = m.tree.Winner().At() + + for m.tree.Next() { + if m.current.sample.Timestamp != m.tree.Winner().At().Timestamp || m.current.pattern != m.tree.Winner().Pattern() { + return true + } + m.current.sample.Value += m.tree.Winner().At().Value + } + + m.done = true + return true +} + +func (m *mergeIterator) Pattern() string { + return m.current.pattern +} + +func (m *mergeIterator) At() logproto.PatternSample { + return m.current.sample +} + +func (m *mergeIterator) Error() error { + return nil +} + +func (m *mergeIterator) Close() error { + m.tree.Close() + return nil +} diff --git a/pkg/pattern/iter/merge_test.go b/pkg/pattern/iter/merge_test.go new file mode 100644 index 0000000000000..a1d643a5a01c1 --- /dev/null +++ b/pkg/pattern/iter/merge_test.go @@ -0,0 +1,78 @@ +package iter + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +func TestMerge(t *testing.T) { + tests := []struct { + name string + iterators []Iterator + expected []patternSample + }{ + { + name: "Empty iterators", + iterators: []Iterator{}, + expected: nil, + }, + { + name: "Merge single iterator", + iterators: []Iterator{ + NewSlice("a", []logproto.PatternSample{ + {Timestamp: 10, Value: 2}, {Timestamp: 20, Value: 4}, {Timestamp: 30, Value: 6}, + }), + }, + expected: []patternSample{ + {"a", logproto.PatternSample{Timestamp: 10, Value: 2}}, + {"a", logproto.PatternSample{Timestamp: 20, Value: 4}}, + {"a", logproto.PatternSample{Timestamp: 30, Value: 6}}, + }, + }, + { + name: "Merge multiple iterators", + iterators: []Iterator{ + NewSlice("a", []logproto.PatternSample{{Timestamp: 10, Value: 2}, {Timestamp: 30, Value: 6}}), + NewSlice("b", []logproto.PatternSample{{Timestamp: 20, Value: 4}, {Timestamp: 40, Value: 8}}), + }, + expected: []patternSample{ + {"a", logproto.PatternSample{Timestamp: 10, Value: 2}}, + {"b", logproto.PatternSample{Timestamp: 20, Value: 4}}, + {"a", logproto.PatternSample{Timestamp: 30, Value: 6}}, + {"b", logproto.PatternSample{Timestamp: 40, Value: 8}}, + }, + }, + { + name: "Merge multiple iterators with similar samples", + iterators: []Iterator{ + NewSlice("a", []logproto.PatternSample{{Timestamp: 10, Value: 2}, {Timestamp: 30, Value: 6}}), + NewSlice("a", []logproto.PatternSample{{Timestamp: 10, Value: 2}, {Timestamp: 30, Value: 6}}), + NewSlice("b", []logproto.PatternSample{{Timestamp: 20, Value: 4}, {Timestamp: 40, Value: 8}}), + }, + expected: []patternSample{ + {"a", logproto.PatternSample{Timestamp: 10, Value: 4}}, + {"b", logproto.PatternSample{Timestamp: 20, Value: 4}}, + {"a", logproto.PatternSample{Timestamp: 30, Value: 12}}, + {"b", logproto.PatternSample{Timestamp: 40, Value: 8}}, + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + it := NewMerge(tt.iterators...) + defer it.Close() + + var result []patternSample + for it.Next() { + result = append(result, patternSample{it.Pattern(), it.At()}) + } + + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/pattern/iter/query_client.go b/pkg/pattern/iter/query_client.go new file mode 100644 index 0000000000000..f6c5c4fa97744 --- /dev/null +++ b/pkg/pattern/iter/query_client.go @@ -0,0 +1,64 @@ +package iter + +import ( + "io" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +type queryClientIterator struct { + client logproto.Pattern_QueryClient + err error + curr Iterator +} + +// NewQueryClientIterator returns an iterator over a QueryClient. +func NewQueryClientIterator(client logproto.Pattern_QueryClient) Iterator { + return &queryClientIterator{ + client: client, + } +} + +func (i *queryClientIterator) Next() bool { + for i.curr == nil || !i.curr.Next() { + batch, err := i.client.Recv() + if err == io.EOF { + return false + } else if err != nil { + i.err = err + return false + } + i.curr = NewQueryResponseIterator(batch) + } + + return true +} + +func (i *queryClientIterator) Pattern() string { + return i.curr.Pattern() +} + +func (i *queryClientIterator) At() logproto.PatternSample { + return i.curr.At() +} + +func (i *queryClientIterator) Error() error { + return i.err +} + +func (i *queryClientIterator) Close() error { + return i.client.CloseSend() +} + +func NewQueryResponseIterator(resp *logproto.QueryPatternsResponse) Iterator { + iters := make([]Iterator, len(resp.Series)) + for i, s := range resp.Series { + // todo we should avoid this conversion + samples := make([]logproto.PatternSample, len(s.Samples)) + for j, sample := range s.Samples { + samples[j] = *sample + } + iters[i] = NewSlice(s.Pattern, samples) + } + return NewMerge(iters...) +} diff --git a/pkg/pattern/metrics.go b/pkg/pattern/metrics.go new file mode 100644 index 0000000000000..e4a9c146c36f6 --- /dev/null +++ b/pkg/pattern/metrics.go @@ -0,0 +1,21 @@ +package pattern + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type ingesterMetrics struct { + flushQueueLength prometheus.Gauge +} + +func newIngesterMetrics(r prometheus.Registerer, metricsNamespace string) *ingesterMetrics { + return &ingesterMetrics{ + flushQueueLength: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Subsystem: "pattern_ingester", + Name: "flush_queue_length", + Help: "The total number of series pending in the flush queue.", + }), + } +} diff --git a/pkg/pattern/ring_client.go b/pkg/pattern/ring_client.go new file mode 100644 index 0000000000000..3ceaf481a3b9b --- /dev/null +++ b/pkg/pattern/ring_client.go @@ -0,0 +1,77 @@ +package pattern + +import ( + "context" + "fmt" + + "github.com/go-kit/log" + "github.com/grafana/dskit/ring" + ring_client "github.com/grafana/dskit/ring/client" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/v3/pkg/pattern/clientpool" +) + +type RingClient struct { + cfg Config + logger log.Logger + + services.Service + subservices *services.Manager + subservicesWatcher *services.FailureWatcher + ring *ring.Ring + pool *ring_client.Pool +} + +func NewRingClient( + cfg Config, + metricsNamespace string, + registerer prometheus.Registerer, + logger log.Logger, +) (*RingClient, error) { + var err error + registerer = prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer) + ringClient := &RingClient{ + logger: log.With(logger, "component", "pattern-ring-client"), + cfg: cfg, + } + ringClient.ring, err = ring.New(cfg.LifecyclerConfig.RingConfig, "pattern-ingester", "pattern-ring", ringClient.logger, registerer) + if err != nil { + return nil, err + } + factory := cfg.factory + if factory == nil { + factory = ring_client.PoolAddrFunc(func(addr string) (ring_client.PoolClient, error) { + return clientpool.NewClient(cfg.ClientConfig, addr) + }) + } + ringClient.pool = clientpool.NewPool("pattern-ingester", cfg.ClientConfig.PoolConfig, ringClient.ring, factory, logger, metricsNamespace) + + ringClient.subservices, err = services.NewManager(ringClient.pool, ringClient.ring) + if err != nil { + return nil, fmt.Errorf("services manager: %w", err) + } + ringClient.subservicesWatcher = services.NewFailureWatcher() + ringClient.subservicesWatcher.WatchManager(ringClient.subservices) + ringClient.Service = services.NewBasicService(ringClient.starting, ringClient.running, ringClient.stopping) + + return ringClient, nil +} + +func (q *RingClient) starting(ctx context.Context) error { + return services.StartManagerAndAwaitHealthy(ctx, q.subservices) +} + +func (q *RingClient) running(ctx context.Context) error { + select { + case <-ctx.Done(): + return nil + case err := <-q.subservicesWatcher.Chan(): + return fmt.Errorf("pattern tee subservices failed: %w", err) + } +} + +func (q *RingClient) stopping(_ error) error { + return services.StopManagerAndAwaitStopped(context.Background(), q.subservices) +} diff --git a/pkg/pattern/stream.go b/pkg/pattern/stream.go new file mode 100644 index 0000000000000..2327d92fb329f --- /dev/null +++ b/pkg/pattern/stream.go @@ -0,0 +1,130 @@ +package pattern + +import ( + "context" + "sync" + "time" + + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/pattern/drain" + "github.com/grafana/loki/v3/pkg/pattern/iter" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" +) + +// TODO(kolesnikovae): +// +// This is crucial for Drain to ensure that the first LogClusterDepth tokens +// are constant (see https://jiemingzhu.github.io/pub/pjhe_icws2017.pdf). +// We should remove any variables such as timestamps, IDs, IPs, counters, etc. +// from these tokens. +// +// Moreover, Drain is not designed for structured logs. Therefore, we should +// handle logfmt (and, probably, JSON) logs in a special way: +// +// The parse tree should have a fixed length, and the depth should be +// determined by the number of fields in the logfmt message. +// A parsing tree should be maintained for each unique field set. + +var drainConfig = &drain.Config{ + // At training, if at the depth of LogClusterDepth there is a cluster with + // similarity coefficient greater that SimTh, then the log message is added + // to that cluster. Otherwise, a new cluster is created. + // + // LogClusterDepth should be equal to the number of constant tokens from + // the beginning of the message that likely determine the message contents. + // + // > In this step, Drain traverses from a 1-st layer node, which + // > is searched in step 2, to a leaf node. This step is based on + // > the assumption that tokens in the beginning positions of a log + // > message are more likely to be constants. Specifically, Drain + // > selects the next internal node by the tokens in the beginning + // > positions of the log message + LogClusterDepth: 8, + // SimTh is basically a ratio of matching/total in the cluster. + // Cluster tokens: "foo <*> bar fred" + // Log line: "foo bar baz qux" + // * * * x + // Similarity of these sequences is 0.75 (the distance) + // Both SimTh and MaxClusterDepth impact branching factor: the greater + // MaxClusterDepth and SimTh, the less the chance that there will be + // "similar" clusters, but the greater the footprint. + SimTh: 0.3, + MaxChildren: 100, + ParamString: "<_>", + MaxClusters: 300, +} + +type stream struct { + fp model.Fingerprint + labels labels.Labels + labelsString string + labelHash uint64 + patterns *drain.Drain + mtx sync.Mutex + + lastTs int64 +} + +func newStream( + fp model.Fingerprint, + labels labels.Labels, +) (*stream, error) { + return &stream{ + fp: fp, + labels: labels, + labelsString: labels.String(), + labelHash: labels.Hash(), + patterns: drain.New(drainConfig), + }, nil +} + +func (s *stream) Push( + _ context.Context, + entries []logproto.Entry, +) error { + s.mtx.Lock() + defer s.mtx.Unlock() + + for _, entry := range entries { + if entry.Timestamp.UnixNano() < s.lastTs { + continue + } + s.lastTs = entry.Timestamp.UnixNano() + s.patterns.Train(entry.Line, entry.Timestamp.UnixNano()) + } + return nil +} + +func (s *stream) Iterator(_ context.Context, from, through model.Time) (iter.Iterator, error) { + // todo we should improve locking. + s.mtx.Lock() + defer s.mtx.Unlock() + + clusters := s.patterns.Clusters() + iters := make([]iter.Iterator, 0, len(clusters)) + + for _, cluster := range clusters { + if cluster.String() == "" { + continue + } + iters = append(iters, cluster.Iterator(from, through)) + } + return iter.NewMerge(iters...), nil +} + +func (s *stream) prune(olderThan time.Duration) bool { + s.mtx.Lock() + defer s.mtx.Unlock() + + clusters := s.patterns.Clusters() + for _, cluster := range clusters { + cluster.Prune(olderThan) + if cluster.Size == 0 { + s.patterns.Delete(cluster) + } + } + + return len(s.patterns.Clusters()) == 0 +} diff --git a/pkg/pattern/stream_test.go b/pkg/pattern/stream_test.go new file mode 100644 index 0000000000000..6d3b0010d3254 --- /dev/null +++ b/pkg/pattern/stream_test.go @@ -0,0 +1,77 @@ +package pattern + +import ( + "context" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/pattern/iter" + + "github.com/grafana/loki/pkg/push" +) + +func TestAddStream(t *testing.T) { + lbs := labels.New(labels.Label{Name: "test", Value: "test"}) + stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs) + require.NoError(t, err) + + err = stream.Push(context.Background(), []push.Entry{ + { + Timestamp: time.Unix(20, 0), + Line: "ts=1 msg=hello", + }, + { + Timestamp: time.Unix(20, 0), + Line: "ts=2 msg=hello", + }, + { + Timestamp: time.Unix(10, 0), + Line: "ts=3 msg=hello", // this should be ignored because it's older than the last entry + }, + }) + require.NoError(t, err) + it, err := stream.Iterator(context.Background(), model.Earliest, model.Latest) + require.NoError(t, err) + res, err := iter.ReadAll(it) + require.NoError(t, err) + require.Equal(t, 1, len(res.Series)) + require.Equal(t, int64(2), res.Series[0].Samples[0].Value) +} + +func TestPruneStream(t *testing.T) { + lbs := labels.New(labels.Label{Name: "test", Value: "test"}) + stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs) + require.NoError(t, err) + + err = stream.Push(context.Background(), []push.Entry{ + { + Timestamp: time.Unix(20, 0), + Line: "ts=1 msg=hello", + }, + { + Timestamp: time.Unix(20, 0), + Line: "ts=2 msg=hello", + }, + }) + require.NoError(t, err) + require.Equal(t, true, stream.prune(time.Hour)) + + err = stream.Push(context.Background(), []push.Entry{ + { + Timestamp: time.Now(), + Line: "ts=1 msg=hello", + }, + }) + require.NoError(t, err) + require.Equal(t, false, stream.prune(time.Hour)) + it, err := stream.Iterator(context.Background(), model.Earliest, model.Latest) + require.NoError(t, err) + res, err := iter.ReadAll(it) + require.NoError(t, err) + require.Equal(t, 1, len(res.Series)) + require.Equal(t, int64(1), res.Series[0].Samples[0].Value) +} diff --git a/pkg/pattern/streams_map.go b/pkg/pattern/streams_map.go new file mode 100644 index 0000000000000..b00dc86ec9547 --- /dev/null +++ b/pkg/pattern/streams_map.go @@ -0,0 +1,149 @@ +package pattern + +import ( + "sync" + + "github.com/prometheus/common/model" + "go.uber.org/atomic" +) + +type streamsMap struct { + consistencyMtx sync.RWMutex // Keep read/write consistency between other fields + streams *sync.Map // map[string]*stream + streamsByFP *sync.Map // map[model.Fingerprint]*stream + streamsCounter *atomic.Int64 +} + +func newStreamsMap() *streamsMap { + return &streamsMap{ + consistencyMtx: sync.RWMutex{}, + streams: &sync.Map{}, + streamsByFP: &sync.Map{}, + streamsCounter: atomic.NewInt64(0), + } +} + +// Load is lock-free. If usage of the stream is consistency sensitive, must be called inside WithRLock at least +func (m *streamsMap) Load(key string) (*stream, bool) { + return m.load(m.streams, key) +} + +// LoadByFP is lock-free. If usage of the stream is consistency sensitive, must be called inside WithRLock at least +func (m *streamsMap) LoadByFP(fp model.Fingerprint) (*stream, bool) { + return m.load(m.streamsByFP, fp) +} + +// Store must be called inside WithLock +func (m *streamsMap) Store(key string, s *stream) { + m.store(key, s) +} + +// StoreByFP must be called inside WithLock +func (m *streamsMap) StoreByFP(fp model.Fingerprint, s *stream) { + m.store(fp, s) +} + +// Delete must be called inside WithLock +func (m *streamsMap) Delete(s *stream) bool { + _, loaded := m.streams.LoadAndDelete(s.labelsString) + if loaded { + m.streamsByFP.Delete(s.fp) + m.streamsCounter.Dec() + return true + } + return false +} + +// LoadOrStoreNew already has lock inside, do NOT call inside WithLock or WithRLock +func (m *streamsMap) LoadOrStoreNew(key string, newStreamFn func() (*stream, error), postLoadFn func(*stream) error) (*stream, bool, error) { + return m.loadOrStoreNew(m.streams, key, newStreamFn, postLoadFn) +} + +// LoadOrStoreNewByFP already has lock inside, do NOT call inside WithLock or WithRLock +func (m *streamsMap) LoadOrStoreNewByFP(fp model.Fingerprint, newStreamFn func() (*stream, error), postLoadFn func(*stream) error) (*stream, bool, error) { + return m.loadOrStoreNew(m.streamsByFP, fp, newStreamFn, postLoadFn) +} + +// WithLock is a helper function to execute write operations +func (m *streamsMap) WithLock(fn func()) { + m.consistencyMtx.Lock() + defer m.consistencyMtx.Unlock() + fn() +} + +// WithRLock is a helper function to execute consistency sensitive read operations. +// Generally, if a stream loaded from streamsMap will have its chunkMtx locked, chunkMtx.Lock is supposed to be called +// within this function. +func (m *streamsMap) WithRLock(fn func()) { + m.consistencyMtx.RLock() + defer m.consistencyMtx.RUnlock() + fn() +} + +func (m *streamsMap) ForEach(fn func(s *stream) (bool, error)) error { + var c bool + var err error + m.streams.Range(func(_, value interface{}) bool { + c, err = fn(value.(*stream)) + return c + }) + return err +} + +func (m *streamsMap) Len() int { + return int(m.streamsCounter.Load()) +} + +func (m *streamsMap) load(mp *sync.Map, key interface{}) (*stream, bool) { + if v, ok := mp.Load(key); ok { + return v.(*stream), true + } + return nil, false +} + +func (m *streamsMap) store(key interface{}, s *stream) { + if labelsString, ok := key.(string); ok { + m.streams.Store(labelsString, s) + } else { + m.streams.Store(s.labelsString, s) + } + m.streamsByFP.Store(s.fp, s) + m.streamsCounter.Inc() +} + +// newStreamFn: Called if not loaded, with consistencyMtx locked. Must not be nil +// postLoadFn: Called if loaded, with consistencyMtx read-locked at least. Can be nil +func (m *streamsMap) loadOrStoreNew(mp *sync.Map, key interface{}, newStreamFn func() (*stream, error), postLoadFn func(*stream) error) (*stream, bool, error) { + var s *stream + var loaded bool + var err error + m.WithRLock(func() { + if s, loaded = m.load(mp, key); loaded { + if postLoadFn != nil { + err = postLoadFn(s) + } + } + }) + + if loaded { + return s, true, err + } + + m.WithLock(func() { + // Double check + if s, loaded = m.load(mp, key); loaded { + if postLoadFn != nil { + err = postLoadFn(s) + } + return + } + + s, err = newStreamFn() + if err != nil { + return + } + m.store(key, s) + }) + + return s, loaded, err +} diff --git a/pkg/pattern/tee.go b/pkg/pattern/tee.go new file mode 100644 index 0000000000000..70fb37e1b6929 --- /dev/null +++ b/pkg/pattern/tee.go @@ -0,0 +1,88 @@ +package pattern + +import ( + "context" + "errors" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/user" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/loki/v3/pkg/distributor" + "github.com/grafana/loki/v3/pkg/logproto" +) + +type Tee struct { + cfg Config + logger log.Logger + ringClient *RingClient + + ingesterAppends *prometheus.CounterVec +} + +func NewTee( + cfg Config, + ringClient *RingClient, + metricsNamespace string, + registerer prometheus.Registerer, + logger log.Logger, +) (*Tee, error) { + registerer = prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer) + + t := &Tee{ + logger: log.With(logger, "component", "pattern-tee"), + ingesterAppends: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "pattern_ingester_appends_total", + Help: "The total number of batch appends sent to pattern ingesters.", + }, []string{"ingester", "status"}), + cfg: cfg, + ringClient: ringClient, + } + + return t, nil +} + +// Duplicate Implements distributor.Tee which is used to tee distributor requests to pattern ingesters. +func (t *Tee) Duplicate(tenant string, streams []distributor.KeyedStream) { + for idx := range streams { + go func(stream distributor.KeyedStream) { + if err := t.sendStream(tenant, stream); err != nil { + level.Error(t.logger).Log("msg", "failed to send stream to pattern ingester", "err", err) + } + }(streams[idx]) + } +} + +func (t *Tee) sendStream(tenant string, stream distributor.KeyedStream) error { + var descs [1]ring.InstanceDesc + replicationSet, err := t.ringClient.ring.Get(stream.HashKey, ring.WriteNoExtend, descs[:0], nil, nil) + if err != nil { + return err + } + if replicationSet.Instances == nil { + return errors.New("no instances found") + } + addr := replicationSet.Instances[0].Addr + client, err := t.ringClient.pool.GetClientFor(addr) + if err != nil { + return err + } + req := &logproto.PushRequest{ + Streams: []logproto.Stream{ + stream.Stream, + }, + } + + ctx, cancel := context.WithTimeout(user.InjectOrgID(context.Background(), tenant), t.cfg.ClientConfig.RemoteTimeout) + defer cancel() + _, err = client.(logproto.PatternClient).Push(ctx, req) + if err != nil { + t.ingesterAppends.WithLabelValues(addr, "fail").Inc() + return err + } + t.ingesterAppends.WithLabelValues(addr, "success").Inc() + return nil +} diff --git a/pkg/pattern/testdata/patterns.txt b/pkg/pattern/testdata/patterns.txt new file mode 100644 index 0000000000000..4aa449faf0e2c --- /dev/null +++ b/pkg/pattern/testdata/patterns.txt @@ -0,0 +1,75 @@ +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=3 <_> <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=0 <_> <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=2 <_> <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=6 <_> <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" <_> partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=wrapper.go:48 level=info component=distributor msg="sample remote write" eventType=bi <_> <_> <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" <_> partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" <_> partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=7 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=1 <_> <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=6 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=0 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=2 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=3 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=5 <_> <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=4 <_> <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=4 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=1 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=7 <_> <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_> +<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=5 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type" +<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_> diff --git a/pkg/querier/handler.go b/pkg/querier/handler.go index 0f3feacc0087b..ee5d0648ac276 100644 --- a/pkg/querier/handler.go +++ b/pkg/querier/handler.go @@ -120,6 +120,14 @@ func (h *Handler) Do(ctx context.Context, req queryrangebase.Request) (queryrang return &queryrange.DetectedFieldsResponse{ Response: result, }, nil + case *logproto.QueryPatternsRequest: + result, err := h.api.PatternsHandler(ctx, concrete) + if err != nil { + return nil, err + } + return &queryrange.QueryPatternsResponse{ + Response: result, + }, nil case *queryrange.DetectedLabelsRequest: result, err := h.api.DetectedLabelsHandler(ctx, &concrete.DetectedLabelsRequest) if err != nil { diff --git a/pkg/querier/http.go b/pkg/querier/http.go index 614fc5e46104d..5d9f216a1463b 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -392,6 +392,19 @@ func (q *QuerierAPI) DetectedFieldsHandler(ctx context.Context, req *logproto.De return resp, nil } +func (q *QuerierAPI) PatternsHandler(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) { + resp, err := q.querier.Patterns(ctx, req) + if err != nil { + return nil, err + } + if resp == nil { // Some stores don't implement this + return &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{}, + }, nil + } + return resp, nil +} + func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, expr syntax.Expr, limit uint32) error { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 3c03cde0653b7..f4ff897e5ab85 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -105,6 +105,7 @@ type Querier interface { IndexShards(ctx context.Context, req *loghttp.RangeQuery, targetBytesPerShard uint64) (*logproto.ShardsResponse, error) Volume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) DetectedFields(ctx context.Context, req *logproto.DetectedFieldsRequest) (*logproto.DetectedFieldsResponse, error) + Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) DetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) } @@ -123,6 +124,7 @@ type SingleTenantQuerier struct { store Store limits Limits ingesterQuerier *IngesterQuerier + patternQuerier PatterQuerier deleteGetter deleteGetter metrics *Metrics logger log.Logger @@ -949,6 +951,22 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. }, nil } +type PatterQuerier interface { + Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) +} + +func (q *SingleTenantQuerier) WithPatternQuerier(pq PatterQuerier) { + q.patternQuerier = pq +} + +func (q *SingleTenantQuerier) Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) { + if q.patternQuerier == nil { + return nil, httpgrpc.Errorf(http.StatusNotFound, "") + } + res, err := q.patternQuerier.Patterns(ctx, req) + return res, err +} + func (q *SingleTenantQuerier) isLabelRelevant(label string, values *logproto.UniqueLabelValues) bool { staticLabels := []string{"pod", "namespace", "cluster", "instance"} cardinality := len(values.Values) @@ -1000,7 +1018,7 @@ func (q *SingleTenantQuerier) DetectedFields(ctx context.Context, req *logproto. return nil, err } - //TODO(twhitney): converting from a step to a duration should be abstracted and reused, + // TODO(twhitney): converting from a step to a duration should be abstracted and reused, // doing this in a few places now. streams, err := streamsForFieldDetection(iters, req.LineLimit, time.Duration(req.Step*1e6)) if err != nil { diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 83b1b6e6a8a4e..3d5edc50b8317 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -592,6 +592,18 @@ func (q *querierMock) DetectedFields(ctx context.Context, req *logproto.Detected return resp.(*logproto.DetectedFieldsResponse), err } +func (q *querierMock) Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) { + args := q.MethodCalled("Patterns", ctx, req) + + resp := args.Get(0) + err := args.Error(1) + if resp == nil { + return nil, err + } + + return resp.(*logproto.QueryPatternsResponse), err +} + func (q *querierMock) DetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) { args := q.MethodCalled("DetectedFields", ctx, req) diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 2d36eafc0a5da..85a77fe859f16 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -480,12 +480,17 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer DetectedFieldsRequest: *req, path: r.URL.Path, }, nil + case PatternsQueryOp: + req, err := loghttp.ParsePatternsQuery(r) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + return req, nil case DetectedLabelsOp: req, err := loghttp.ParseDetectedLabelsQuery(r) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - return &DetectedLabelsRequest{ DetectedLabelsRequest: *req, path: r.URL.Path, @@ -693,6 +698,12 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) DetectedFieldsRequest: *req, path: httpReq.URL.Path, }, ctx, nil + case PatternsQueryOp: + req, err := loghttp.ParsePatternsQuery(httpReq) + if err != nil { + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + return req, ctx, nil case DetectedLabelsOp: req, err := loghttp.ParseDetectedLabelsQuery(httpReq) if err != nil { @@ -978,6 +989,26 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht Header: header, } + return req.WithContext(ctx), nil + case *logproto.QueryPatternsRequest: + params := url.Values{ + "start": []string{fmt.Sprintf("%d", request.Start.UnixNano())}, + "end": []string{fmt.Sprintf("%d", request.End.UnixNano())}, + "query": []string{request.GetQuery()}, + } + + u := &url.URL{ + Path: "/loki/api/v1/patterns", + RawQuery: params.Encode(), + } + req := &http.Request{ + Method: "GET", + RequestURI: u.String(), // This is what the httpgrpc code looks at. + URL: u, + Body: http.NoBody, + Header: header, + } + return req.WithContext(ctx), nil case *DetectedLabelsRequest: params := url.Values{ @@ -1026,6 +1057,8 @@ func (c Codec) Path(r queryrangebase.Request) string { return "/loki/api/v1/index/volume_range" case *DetectedFieldsRequest: return "/loki/api/v1/detected_fields" + case *logproto.QueryPatternsRequest: + return "/loki/api/v1/patterns" case *DetectedLabelsRequest: return "/loki/api/v1/detected_labels" } @@ -1137,6 +1170,15 @@ func decodeResponseJSONFrom(buf []byte, req queryrangebase.Request, headers http Response: &resp, Headers: httpResponseHeadersToPromResponseHeaders(headers), }, nil + case *logproto.QueryPatternsRequest: + var resp logproto.QueryPatternsResponse + if err := json.Unmarshal(buf, &resp); err != nil { + return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err) + } + return &QueryPatternsResponse{ + Response: &resp, + Headers: httpResponseHeadersToPromResponseHeaders(headers), + }, nil case *DetectedLabelsRequest: var resp logproto.DetectedLabelsResponse if err := json.Unmarshal(buf, &resp); err != nil { @@ -1368,6 +1410,10 @@ func encodeResponseJSONTo(version loghttp.Version, res queryrangebase.Response, if err := marshal.WriteDetectedFieldsResponseJSON(response.Response, w); err != nil { return err } + case *QueryPatternsResponse: + if err := marshal.WriteQueryPatternsResponseJSON(response.Response, w); err != nil { + return err + } case *DetectedLabelsResponse: if err := marshal.WriteDetectedLabelsResponseJSON(response.Response, w); err != nil { return err diff --git a/pkg/querier/queryrange/extensions.go b/pkg/querier/queryrange/extensions.go index 6e377295283f5..ec5fa25ae308e 100644 --- a/pkg/querier/queryrange/extensions.go +++ b/pkg/querier/queryrange/extensions.go @@ -254,6 +254,14 @@ func (m *DetectedFieldsResponse) WithHeaders(h []queryrangebase.PrometheusRespon return m } +// GetHeaders returns the HTTP headers in the response. +func (m *QueryPatternsResponse) GetHeaders() []*queryrangebase.PrometheusResponseHeader { + if m != nil { + return convertPrometheusResponseHeadersToPointers(m.Headers) + } + return nil +} + // GetHeaders returns the HTTP headers in the response. func (m *DetectedLabelsResponse) GetHeaders() []*queryrangebase.PrometheusResponseHeader { if m != nil { @@ -262,6 +270,15 @@ func (m *DetectedLabelsResponse) GetHeaders() []*queryrangebase.PrometheusRespon return nil } +func (m *QueryPatternsResponse) SetHeader(name, value string) { + m.Headers = setHeader(m.Headers, name, value) +} + +func (m *QueryPatternsResponse) WithHeaders(h []queryrangebase.PrometheusResponseHeader) queryrangebase.Response { + m.Headers = h + return m +} + func (m *DetectedLabelsResponse) SetHeader(name, value string) { m.Headers = setHeader(m.Headers, name, value) } diff --git a/pkg/querier/queryrange/marshal.go b/pkg/querier/queryrange/marshal.go index 8dc1751f8d875..b3920e00a6668 100644 --- a/pkg/querier/queryrange/marshal.go +++ b/pkg/querier/queryrange/marshal.go @@ -227,6 +227,8 @@ func QueryResponseUnwrap(res *QueryResponse) (queryrangebase.Response, error) { return concrete.TopkSketches, nil case *QueryResponse_QuantileSketches: return concrete.QuantileSketches, nil + case *QueryResponse_PatternsResponse: + return concrete.PatternsResponse, nil case *QueryResponse_DetectedLabels: return concrete.DetectedLabels, nil case *QueryResponse_DetectedFields: @@ -266,6 +268,8 @@ func QueryResponseWrap(res queryrangebase.Response) (*QueryResponse, error) { p.Response = &QueryResponse_QuantileSketches{response} case *ShardsResponse: p.Response = &QueryResponse_ShardsResponse{response} + case *QueryPatternsResponse: + p.Response = &QueryResponse_PatternsResponse{response} case *DetectedLabelsResponse: p.Response = &QueryResponse_DetectedLabels{response} case *DetectedFieldsResponse: @@ -358,6 +362,8 @@ func (Codec) QueryRequestUnwrap(ctx context.Context, req *QueryRequest) (queryra return &LabelRequest{ LabelRequest: *concrete.Labels, }, ctx, nil + case *QueryRequest_PatternsRequest: + return concrete.PatternsRequest, ctx, nil case *QueryRequest_DetectedLabels: return &DetectedLabelsRequest{ DetectedLabelsRequest: *concrete.DetectedLabels, @@ -372,7 +378,6 @@ func (Codec) QueryRequestUnwrap(ctx context.Context, req *QueryRequest) (queryra } func (Codec) QueryRequestWrap(ctx context.Context, r queryrangebase.Request) (*QueryRequest, error) { - result := &QueryRequest{ Metadata: make(map[string]string), } @@ -392,6 +397,8 @@ func (Codec) QueryRequestWrap(ctx context.Context, r queryrangebase.Request) (*Q result.Request = &QueryRequest_Streams{Streams: req} case *logproto.ShardsRequest: result.Request = &QueryRequest_ShardsRequest{ShardsRequest: req} + case *logproto.QueryPatternsRequest: + result.Request = &QueryRequest_PatternsRequest{PatternsRequest: req} case *DetectedLabelsRequest: result.Request = &QueryRequest_DetectedLabels{DetectedLabels: &req.DetectedLabelsRequest} case *DetectedFieldsRequest: diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index 0b7f68b77584f..e78a1054643ad 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -929,6 +929,43 @@ func (m *DetectedFieldsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_DetectedFieldsResponse proto.InternalMessageInfo +type QueryPatternsResponse struct { + Response *github_com_grafana_loki_v3_pkg_logproto.QueryPatternsResponse `protobuf:"bytes,1,opt,name=response,proto3,customtype=github.com/grafana/loki/v3/pkg/logproto.QueryPatternsResponse" json:"response,omitempty"` + Headers []github_com_grafana_loki_v3_pkg_querier_queryrange_queryrangebase_definitions.PrometheusResponseHeader `protobuf:"bytes,2,rep,name=Headers,proto3,customtype=github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions.PrometheusResponseHeader" json:"-"` +} + +func (m *QueryPatternsResponse) Reset() { *m = QueryPatternsResponse{} } +func (*QueryPatternsResponse) ProtoMessage() {} +func (*QueryPatternsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_51b9d53b40d11902, []int{15} +} +func (m *QueryPatternsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPatternsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPatternsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPatternsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPatternsResponse.Merge(m, src) +} +func (m *QueryPatternsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPatternsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPatternsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPatternsResponse proto.InternalMessageInfo + type DetectedLabelsResponse struct { Response *github_com_grafana_loki_v3_pkg_logproto.DetectedLabelsResponse `protobuf:"bytes,1,opt,name=response,proto3,customtype=github.com/grafana/loki/v3/pkg/logproto.DetectedLabelsResponse" json:"response,omitempty"` Headers []github_com_grafana_loki_v3_pkg_querier_queryrange_queryrangebase_definitions.PrometheusResponseHeader `protobuf:"bytes,2,rep,name=Headers,proto3,customtype=github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions.PrometheusResponseHeader" json:"-"` @@ -937,7 +974,7 @@ type DetectedLabelsResponse struct { func (m *DetectedLabelsResponse) Reset() { *m = DetectedLabelsResponse{} } func (*DetectedLabelsResponse) ProtoMessage() {} func (*DetectedLabelsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{15} + return fileDescriptor_51b9d53b40d11902, []int{16} } func (m *DetectedLabelsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -979,6 +1016,7 @@ type QueryResponse struct { // *QueryResponse_QuantileSketches // *QueryResponse_ShardsResponse // *QueryResponse_DetectedFields + // *QueryResponse_PatternsResponse // *QueryResponse_DetectedLabels Response isQueryResponse_Response `protobuf_oneof:"response"` } @@ -986,7 +1024,7 @@ type QueryResponse struct { func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (*QueryResponse) ProtoMessage() {} func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{16} + return fileDescriptor_51b9d53b40d11902, []int{17} } func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1052,8 +1090,11 @@ type QueryResponse_ShardsResponse struct { type QueryResponse_DetectedFields struct { DetectedFields *DetectedFieldsResponse `protobuf:"bytes,11,opt,name=detectedFields,proto3,oneof"` } +type QueryResponse_PatternsResponse struct { + PatternsResponse *QueryPatternsResponse `protobuf:"bytes,12,opt,name=patternsResponse,proto3,oneof"` +} type QueryResponse_DetectedLabels struct { - DetectedLabels *DetectedLabelsResponse `protobuf:"bytes,12,opt,name=detectedLabels,proto3,oneof"` + DetectedLabels *DetectedLabelsResponse `protobuf:"bytes,13,opt,name=detectedLabels,proto3,oneof"` } func (*QueryResponse_Series) isQueryResponse_Response() {} @@ -1066,6 +1107,7 @@ func (*QueryResponse_TopkSketches) isQueryResponse_Response() {} func (*QueryResponse_QuantileSketches) isQueryResponse_Response() {} func (*QueryResponse_ShardsResponse) isQueryResponse_Response() {} func (*QueryResponse_DetectedFields) isQueryResponse_Response() {} +func (*QueryResponse_PatternsResponse) isQueryResponse_Response() {} func (*QueryResponse_DetectedLabels) isQueryResponse_Response() {} func (m *QueryResponse) GetResponse() isQueryResponse_Response { @@ -1152,6 +1194,13 @@ func (m *QueryResponse) GetDetectedFields() *DetectedFieldsResponse { return nil } +func (m *QueryResponse) GetPatternsResponse() *QueryPatternsResponse { + if x, ok := m.GetResponse().(*QueryResponse_PatternsResponse); ok { + return x.PatternsResponse + } + return nil +} + func (m *QueryResponse) GetDetectedLabels() *DetectedLabelsResponse { if x, ok := m.GetResponse().(*QueryResponse_DetectedLabels); ok { return x.DetectedLabels @@ -1172,6 +1221,7 @@ func (*QueryResponse) XXX_OneofWrappers() []interface{} { (*QueryResponse_QuantileSketches)(nil), (*QueryResponse_ShardsResponse)(nil), (*QueryResponse_DetectedFields)(nil), + (*QueryResponse_PatternsResponse)(nil), (*QueryResponse_DetectedLabels)(nil), } } @@ -1186,6 +1236,7 @@ type QueryRequest struct { // *QueryRequest_Volume // *QueryRequest_ShardsRequest // *QueryRequest_DetectedFields + // *QueryRequest_PatternsRequest // *QueryRequest_DetectedLabels Request isQueryRequest_Request `protobuf_oneof:"request"` Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata,proto3" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -1194,7 +1245,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (*QueryRequest) ProtoMessage() {} func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_51b9d53b40d11902, []int{17} + return fileDescriptor_51b9d53b40d11902, []int{18} } func (m *QueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1254,19 +1305,23 @@ type QueryRequest_ShardsRequest struct { type QueryRequest_DetectedFields struct { DetectedFields *logproto.DetectedFieldsRequest `protobuf:"bytes,9,opt,name=detectedFields,proto3,oneof"` } +type QueryRequest_PatternsRequest struct { + PatternsRequest *logproto.QueryPatternsRequest `protobuf:"bytes,10,opt,name=patternsRequest,proto3,oneof"` +} type QueryRequest_DetectedLabels struct { - DetectedLabels *logproto.DetectedLabelsRequest `protobuf:"bytes,10,opt,name=detectedLabels,proto3,oneof"` + DetectedLabels *logproto.DetectedLabelsRequest `protobuf:"bytes,11,opt,name=detectedLabels,proto3,oneof"` } -func (*QueryRequest_Series) isQueryRequest_Request() {} -func (*QueryRequest_Labels) isQueryRequest_Request() {} -func (*QueryRequest_Stats) isQueryRequest_Request() {} -func (*QueryRequest_Instant) isQueryRequest_Request() {} -func (*QueryRequest_Streams) isQueryRequest_Request() {} -func (*QueryRequest_Volume) isQueryRequest_Request() {} -func (*QueryRequest_ShardsRequest) isQueryRequest_Request() {} -func (*QueryRequest_DetectedFields) isQueryRequest_Request() {} -func (*QueryRequest_DetectedLabels) isQueryRequest_Request() {} +func (*QueryRequest_Series) isQueryRequest_Request() {} +func (*QueryRequest_Labels) isQueryRequest_Request() {} +func (*QueryRequest_Stats) isQueryRequest_Request() {} +func (*QueryRequest_Instant) isQueryRequest_Request() {} +func (*QueryRequest_Streams) isQueryRequest_Request() {} +func (*QueryRequest_Volume) isQueryRequest_Request() {} +func (*QueryRequest_ShardsRequest) isQueryRequest_Request() {} +func (*QueryRequest_DetectedFields) isQueryRequest_Request() {} +func (*QueryRequest_PatternsRequest) isQueryRequest_Request() {} +func (*QueryRequest_DetectedLabels) isQueryRequest_Request() {} func (m *QueryRequest) GetRequest() isQueryRequest_Request { if m != nil { @@ -1331,6 +1386,13 @@ func (m *QueryRequest) GetDetectedFields() *logproto.DetectedFieldsRequest { return nil } +func (m *QueryRequest) GetPatternsRequest() *logproto.QueryPatternsRequest { + if x, ok := m.GetRequest().(*QueryRequest_PatternsRequest); ok { + return x.PatternsRequest + } + return nil +} + func (m *QueryRequest) GetDetectedLabels() *logproto.DetectedLabelsRequest { if x, ok := m.GetRequest().(*QueryRequest_DetectedLabels); ok { return x.DetectedLabels @@ -1356,6 +1418,7 @@ func (*QueryRequest) XXX_OneofWrappers() []interface{} { (*QueryRequest_Volume)(nil), (*QueryRequest_ShardsRequest)(nil), (*QueryRequest_DetectedFields)(nil), + (*QueryRequest_PatternsRequest)(nil), (*QueryRequest_DetectedLabels)(nil), } } @@ -1376,6 +1439,7 @@ func init() { proto.RegisterType((*QuantileSketchResponse)(nil), "queryrange.QuantileSketchResponse") proto.RegisterType((*ShardsResponse)(nil), "queryrange.ShardsResponse") proto.RegisterType((*DetectedFieldsResponse)(nil), "queryrange.DetectedFieldsResponse") + proto.RegisterType((*QueryPatternsResponse)(nil), "queryrange.QueryPatternsResponse") proto.RegisterType((*DetectedLabelsResponse)(nil), "queryrange.DetectedLabelsResponse") proto.RegisterType((*QueryResponse)(nil), "queryrange.QueryResponse") proto.RegisterType((*QueryRequest)(nil), "queryrange.QueryRequest") @@ -1387,117 +1451,122 @@ func init() { } var fileDescriptor_51b9d53b40d11902 = []byte{ - // 1751 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x6f, 0xdb, 0xc8, - 0x15, 0x17, 0xf5, 0x69, 0x8d, 0x3f, 0xea, 0x8e, 0x0d, 0x87, 0x75, 0x12, 0x51, 0x10, 0xd0, 0xc4, - 0x2d, 0x5a, 0xa9, 0x91, 0x13, 0x37, 0x71, 0x83, 0x20, 0x61, 0x9d, 0x40, 0x46, 0x93, 0x22, 0xa1, - 0x8d, 0x1e, 0x7a, 0x29, 0xc6, 0xd2, 0x58, 0x62, 0x4d, 0x91, 0x34, 0x39, 0x72, 0x62, 0xa0, 0x87, - 0xfc, 0x03, 0x45, 0x03, 0xf4, 0x7f, 0x28, 0x7a, 0x2b, 0x0a, 0xf4, 0xd4, 0x53, 0x7b, 0x0b, 0x0a, - 0x14, 0xc8, 0x31, 0x10, 0x50, 0xb6, 0x51, 0x80, 0x62, 0xe1, 0x53, 0x80, 0xbd, 0xee, 0x61, 0x31, - 0x1f, 0xa4, 0x86, 0xa2, 0xb4, 0x96, 0xb2, 0xd8, 0x83, 0x77, 0xf7, 0x62, 0x0f, 0x67, 0xde, 0xef, - 0x71, 0xf8, 0x7b, 0xbf, 0xf7, 0xf4, 0x66, 0xc0, 0x75, 0xf7, 0xa8, 0x5d, 0x3b, 0xee, 0x61, 0xcf, - 0xc4, 0x1e, 0xfb, 0x7f, 0xea, 0x21, 0xbb, 0x8d, 0xa5, 0x61, 0xd5, 0xf5, 0x1c, 0xe2, 0x40, 0x30, - 0x9c, 0x59, 0xaf, 0xb7, 0x4d, 0xd2, 0xe9, 0x1d, 0x54, 0x9b, 0x4e, 0xb7, 0xd6, 0x76, 0xda, 0x4e, - 0xad, 0xed, 0x38, 0x6d, 0x0b, 0x23, 0xd7, 0xf4, 0xc5, 0xb0, 0xe6, 0xb9, 0xcd, 0x9a, 0x4f, 0x10, - 0xe9, 0xf9, 0x1c, 0xbf, 0xbe, 0x4a, 0x0d, 0xd9, 0x90, 0x41, 0xc4, 0xac, 0x26, 0xcc, 0xd9, 0xd3, - 0x41, 0xef, 0xb0, 0x46, 0xcc, 0x2e, 0xf6, 0x09, 0xea, 0xba, 0xa1, 0x01, 0xdd, 0x9f, 0xe5, 0xb4, - 0x39, 0xd2, 0xb4, 0x5b, 0xf8, 0x45, 0x1b, 0x11, 0xfc, 0x1c, 0x9d, 0x0a, 0x83, 0xcb, 0x31, 0x83, - 0x70, 0x20, 0x16, 0xbf, 0x17, 0x5b, 0xf4, 0x8f, 0x30, 0x69, 0x76, 0xc4, 0x52, 0x59, 0x2c, 0x1d, - 0x5b, 0x5d, 0xa7, 0x85, 0x2d, 0xb6, 0x59, 0x9f, 0xff, 0x15, 0x16, 0x2b, 0xd4, 0xc2, 0xed, 0xf9, - 0x1d, 0xf6, 0x47, 0x4c, 0xfe, 0xfc, 0x5c, 0xbe, 0x0e, 0x90, 0x8f, 0x6b, 0x2d, 0x7c, 0x68, 0xda, - 0x26, 0x31, 0x1d, 0xdb, 0x97, 0xc7, 0xc2, 0xc9, 0xd6, 0x74, 0x4e, 0x46, 0x63, 0x50, 0xf9, 0x6b, - 0x06, 0xcc, 0x3f, 0x76, 0x8e, 0x4c, 0x03, 0x1f, 0xf7, 0xb0, 0x4f, 0xe0, 0x2a, 0xc8, 0x31, 0x1b, - 0x55, 0x29, 0x2b, 0x1b, 0x45, 0x83, 0x3f, 0xd0, 0x59, 0xcb, 0xec, 0x9a, 0x44, 0x4d, 0x97, 0x95, - 0x8d, 0x45, 0x83, 0x3f, 0x40, 0x08, 0xb2, 0x3e, 0xc1, 0xae, 0x9a, 0x29, 0x2b, 0x1b, 0x19, 0x83, - 0x8d, 0xe1, 0x3a, 0x98, 0x33, 0x6d, 0x82, 0xbd, 0x13, 0x64, 0xa9, 0x45, 0x36, 0x1f, 0x3d, 0xc3, - 0x7b, 0xa0, 0xe0, 0x13, 0xe4, 0x91, 0x7d, 0x5f, 0xcd, 0x96, 0x95, 0x8d, 0xf9, 0xfa, 0x7a, 0x95, - 0xc7, 0xaa, 0x1a, 0xc6, 0xaa, 0xba, 0x1f, 0xc6, 0x4a, 0x9f, 0x7b, 0x1d, 0x68, 0xa9, 0x57, 0xff, - 0xd5, 0x14, 0x23, 0x04, 0xc1, 0x6d, 0x90, 0xc3, 0x76, 0x6b, 0xdf, 0x57, 0x73, 0x33, 0xa0, 0x39, - 0x04, 0xde, 0x00, 0xc5, 0x96, 0xe9, 0xe1, 0x26, 0xe5, 0x4c, 0xcd, 0x97, 0x95, 0x8d, 0xa5, 0xfa, - 0x4a, 0x35, 0x0a, 0xed, 0x4e, 0xb8, 0x64, 0x0c, 0xad, 0xe8, 0xe7, 0xb9, 0x88, 0x74, 0xd4, 0x02, - 0x63, 0x82, 0x8d, 0x61, 0x05, 0xe4, 0xfd, 0x0e, 0xf2, 0x5a, 0xbe, 0x3a, 0x57, 0xce, 0x6c, 0x14, - 0x75, 0x70, 0x16, 0x68, 0x62, 0xc6, 0x10, 0xff, 0xe1, 0x6f, 0x40, 0xd6, 0xb5, 0x90, 0xad, 0x02, - 0xb6, 0xcb, 0xe5, 0xaa, 0xc4, 0xf9, 0x53, 0x0b, 0xd9, 0xfa, 0x9d, 0x7e, 0xa0, 0xdd, 0x92, 0xe5, - 0xee, 0xa1, 0x43, 0x64, 0xa3, 0x9a, 0xe5, 0x1c, 0x99, 0xb5, 0x93, 0xcd, 0x9a, 0x1c, 0x49, 0xea, - 0xa8, 0xfa, 0x8c, 0x3a, 0xa0, 0x50, 0x83, 0x39, 0xae, 0xfc, 0x2b, 0x0d, 0x20, 0x8d, 0xd9, 0xae, - 0xed, 0x13, 0x64, 0x93, 0x8f, 0x09, 0xdd, 0x5d, 0x90, 0xa7, 0x69, 0xb1, 0xef, 0xb3, 0xe0, 0x4d, - 0xcb, 0xa5, 0xc0, 0xc4, 0xc9, 0xcc, 0xce, 0x44, 0x66, 0x6e, 0x2c, 0x99, 0xf9, 0x73, 0xc9, 0x2c, - 0x7c, 0x55, 0x64, 0xaa, 0x20, 0x4b, 0x9f, 0xe0, 0x32, 0xc8, 0x78, 0xe8, 0x39, 0xe3, 0x6e, 0xc1, - 0xa0, 0xc3, 0xca, 0x20, 0x0b, 0x16, 0x78, 0x6a, 0xf8, 0xae, 0x63, 0xfb, 0x98, 0xee, 0x77, 0x8f, - 0xd5, 0x1f, 0xce, 0xb0, 0xd8, 0x2f, 0x9b, 0x31, 0xc4, 0x0a, 0xbc, 0x0f, 0xb2, 0x3b, 0x88, 0x20, - 0xc6, 0xf6, 0x7c, 0x7d, 0x55, 0xde, 0x2f, 0xf5, 0x45, 0xd7, 0xf4, 0x35, 0x4a, 0xe8, 0x59, 0xa0, - 0x2d, 0xb5, 0x10, 0x41, 0x3f, 0x72, 0xba, 0x26, 0xc1, 0x5d, 0x97, 0x9c, 0x1a, 0x0c, 0x09, 0x6f, - 0x81, 0xe2, 0x43, 0xcf, 0x73, 0xbc, 0xfd, 0x53, 0x17, 0xb3, 0xe8, 0x14, 0xf5, 0x4b, 0x67, 0x81, - 0xb6, 0x82, 0xc3, 0x49, 0x09, 0x31, 0xb4, 0x84, 0x3f, 0x00, 0x39, 0xf6, 0xc0, 0xe2, 0x51, 0xd4, - 0x57, 0xce, 0x02, 0xed, 0x3b, 0x0c, 0x22, 0x99, 0x73, 0x8b, 0x78, 0xf8, 0x72, 0x53, 0x85, 0x2f, - 0x52, 0x51, 0x5e, 0x56, 0x91, 0x0a, 0x0a, 0x27, 0xd8, 0xf3, 0xa9, 0x9b, 0x02, 0x9b, 0x0f, 0x1f, - 0xe1, 0x03, 0x00, 0x28, 0x31, 0xa6, 0x4f, 0xcc, 0x26, 0xcd, 0x15, 0x4a, 0xc6, 0x62, 0x95, 0x97, - 0x42, 0x03, 0xfb, 0x3d, 0x8b, 0xe8, 0x50, 0xb0, 0x20, 0x19, 0x1a, 0xd2, 0x18, 0xfe, 0x45, 0x01, - 0x85, 0x06, 0x46, 0x2d, 0xec, 0xf9, 0x6a, 0xb1, 0x9c, 0xd9, 0x98, 0xaf, 0x7f, 0xbf, 0x2a, 0xd7, - 0xbd, 0xa7, 0x9e, 0xd3, 0xc5, 0xa4, 0x83, 0x7b, 0x7e, 0x18, 0x20, 0x6e, 0xad, 0xdb, 0xfd, 0x40, - 0xc3, 0x53, 0x4a, 0x62, 0xaa, 0x72, 0x3b, 0xf1, 0x55, 0x67, 0x81, 0xa6, 0xfc, 0xd8, 0x08, 0x77, - 0x09, 0xeb, 0x60, 0xee, 0x39, 0xf2, 0x6c, 0xd3, 0x6e, 0xfb, 0x2a, 0x60, 0x8a, 0x5e, 0x3b, 0x0b, - 0x34, 0x18, 0xce, 0x49, 0x81, 0x88, 0xec, 0x2a, 0xff, 0x51, 0xc0, 0x77, 0xa9, 0x30, 0xf6, 0xe8, - 0x7e, 0x7c, 0x29, 0x95, 0xbb, 0x88, 0x34, 0x3b, 0xaa, 0x42, 0xdd, 0x18, 0xfc, 0x41, 0xae, 0x9f, - 0xe9, 0x2f, 0x55, 0x3f, 0x33, 0xb3, 0xd7, 0xcf, 0x30, 0x7f, 0xb3, 0x63, 0xf3, 0x37, 0x37, 0x29, - 0x7f, 0x2b, 0x7f, 0xc8, 0xf0, 0x5a, 0x15, 0x7e, 0xdf, 0x0c, 0xa9, 0xf4, 0x28, 0x4a, 0xa5, 0x0c, - 0xdb, 0x6d, 0xa4, 0x50, 0xee, 0x6b, 0xb7, 0x85, 0x6d, 0x62, 0x1e, 0x9a, 0xd8, 0x3b, 0x27, 0xa1, - 0x24, 0x95, 0x66, 0xe2, 0x2a, 0x95, 0x25, 0x96, 0xbd, 0x10, 0x12, 0x8b, 0xe7, 0x55, 0xee, 0x23, - 0xf2, 0xaa, 0xf2, 0x69, 0x1a, 0xac, 0xd1, 0x88, 0x3c, 0x46, 0x07, 0xd8, 0xfa, 0x25, 0xea, 0xce, - 0x18, 0x95, 0x6b, 0x52, 0x54, 0x8a, 0x3a, 0xfc, 0x96, 0xf5, 0xe9, 0x58, 0xff, 0x93, 0x02, 0xe6, - 0xc2, 0x1f, 0x00, 0x58, 0x05, 0x80, 0xc3, 0x58, 0x8d, 0xe7, 0x5c, 0x2f, 0x51, 0xb0, 0x17, 0xcd, - 0x1a, 0x92, 0x05, 0xfc, 0x2d, 0xc8, 0xf3, 0x27, 0x91, 0x0b, 0x97, 0xa4, 0x5c, 0x20, 0x1e, 0x46, - 0xdd, 0x07, 0x2d, 0xe4, 0x12, 0xec, 0xe9, 0x77, 0xe8, 0x2e, 0xfa, 0x81, 0x76, 0x7d, 0x12, 0x4b, - 0x61, 0xff, 0x29, 0x70, 0x34, 0xbe, 0xfc, 0x9d, 0x86, 0x78, 0x43, 0xe5, 0xf7, 0x0a, 0x58, 0xa6, - 0x1b, 0xa5, 0xd4, 0x44, 0xc2, 0xd8, 0x01, 0x73, 0x9e, 0x18, 0xb3, 0xed, 0xce, 0xd7, 0x2b, 0xd5, - 0x38, 0xad, 0x63, 0xa8, 0xd4, 0xb3, 0xaf, 0x03, 0x4d, 0x31, 0x22, 0x24, 0xdc, 0x8c, 0xd1, 0x98, - 0x1e, 0x47, 0x23, 0x85, 0xa4, 0x62, 0xc4, 0xfd, 0x23, 0x0d, 0xe0, 0x2e, 0xed, 0xd1, 0xa9, 0xfe, - 0x86, 0x52, 0x7d, 0x91, 0xd8, 0xd1, 0x95, 0x21, 0x29, 0x49, 0x7b, 0xfd, 0x5e, 0x3f, 0xd0, 0xb6, - 0xcf, 0xd1, 0xce, 0x17, 0xe0, 0xa5, 0xaf, 0x90, 0xe5, 0x9b, 0xbe, 0x08, 0xf2, 0xad, 0xfc, 0x2d, - 0x0d, 0x96, 0x7e, 0xe5, 0x58, 0xbd, 0x2e, 0x8e, 0xe8, 0x73, 0x13, 0xf4, 0xa9, 0x43, 0xfa, 0xe2, - 0xb6, 0xfa, 0x76, 0x3f, 0xd0, 0xb6, 0xa6, 0xa5, 0x2e, 0x8e, 0xbd, 0xd0, 0xb4, 0xfd, 0x3f, 0x0d, - 0x56, 0xf7, 0x1d, 0xf7, 0x17, 0x7b, 0xec, 0x8c, 0x27, 0x95, 0xc9, 0x4e, 0x82, 0xbc, 0xd5, 0x21, - 0x79, 0x14, 0xf1, 0x04, 0x11, 0xcf, 0x7c, 0xa1, 0x6f, 0xf5, 0x03, 0xad, 0x3e, 0x2d, 0x71, 0x43, - 0xdc, 0x45, 0x26, 0x2d, 0xd6, 0x03, 0x65, 0xa6, 0xec, 0x81, 0x3e, 0x4b, 0x83, 0xb5, 0x67, 0x3d, - 0x64, 0x13, 0xd3, 0xc2, 0x9c, 0xec, 0x88, 0xea, 0xdf, 0x25, 0xa8, 0x2e, 0x0d, 0xa9, 0x8e, 0x63, - 0x04, 0xe9, 0xf7, 0xfb, 0x81, 0x76, 0x77, 0x5a, 0xd2, 0xc7, 0x79, 0xf8, 0xc6, 0xd1, 0xff, 0xf7, - 0x34, 0x58, 0xda, 0xe3, 0x5d, 0x5b, 0xf8, 0xe1, 0x27, 0x63, 0x68, 0x97, 0x2f, 0x4a, 0xdc, 0x83, - 0x6a, 0x1c, 0x31, 0x5b, 0x91, 0x88, 0x63, 0x2f, 0x74, 0x91, 0xf8, 0x77, 0x1a, 0xac, 0xed, 0x60, - 0x82, 0x9b, 0x04, 0xb7, 0x1e, 0x99, 0xd8, 0x92, 0x48, 0x7c, 0xa9, 0x24, 0x58, 0x2c, 0x4b, 0xc7, - 0xac, 0xb1, 0x20, 0x5d, 0xef, 0x07, 0xda, 0xbd, 0x69, 0x79, 0x1c, 0xef, 0xe3, 0x6b, 0xc3, 0x27, - 0xeb, 0x50, 0x67, 0xe5, 0x33, 0x0e, 0xfa, 0x38, 0x3e, 0xe3, 0x3e, 0x2e, 0x34, 0x9f, 0x7f, 0xcc, - 0x83, 0x45, 0x76, 0xe5, 0x11, 0xd1, 0xf8, 0x43, 0x20, 0x5a, 0x7a, 0xc1, 0x21, 0x0c, 0x8f, 0x81, - 0x9e, 0xdb, 0xac, 0xee, 0x89, 0x66, 0x9f, 0x5b, 0xc0, 0xdb, 0x20, 0xef, 0xb3, 0xc3, 0x96, 0xe8, - 0xd6, 0x4a, 0xa3, 0xf7, 0x19, 0xf1, 0x63, 0x5d, 0x23, 0x65, 0x08, 0x7b, 0x78, 0x17, 0xe4, 0x2d, - 0xc6, 0xa2, 0x38, 0x6c, 0x56, 0x46, 0x91, 0xc9, 0xe3, 0x07, 0x45, 0x73, 0x0c, 0xdc, 0x02, 0x39, - 0xd6, 0x16, 0x8a, 0x7b, 0xc2, 0xd8, 0x6b, 0x93, 0xcd, 0x59, 0x23, 0x65, 0x70, 0x73, 0x58, 0x07, - 0x59, 0xd7, 0x73, 0xba, 0xa2, 0x45, 0xbf, 0x32, 0xfa, 0x4e, 0xb9, 0xa7, 0x6d, 0xa4, 0x0c, 0x66, - 0x0b, 0x6f, 0xd2, 0x53, 0x35, 0x6d, 0x86, 0x7d, 0x76, 0xb9, 0x41, 0x3b, 0xa1, 0x11, 0x98, 0x04, - 0x09, 0x4d, 0xe1, 0x4d, 0x90, 0x3f, 0x61, 0xad, 0x8e, 0xb8, 0x99, 0x5a, 0x97, 0x41, 0xf1, 0x26, - 0x88, 0x7e, 0x17, 0xb7, 0x85, 0x8f, 0xc0, 0x02, 0x71, 0xdc, 0xa3, 0xb0, 0xa3, 0x10, 0x17, 0x23, - 0x65, 0x19, 0x3b, 0xae, 0xe3, 0x68, 0xa4, 0x8c, 0x18, 0x0e, 0x3e, 0x05, 0xcb, 0xc7, 0xb1, 0x9f, - 0x2e, 0xec, 0xb3, 0xdb, 0xd6, 0x11, 0x9e, 0xc7, 0xff, 0xa8, 0x36, 0x52, 0x46, 0x02, 0x0d, 0x77, - 0xc0, 0x92, 0x1f, 0xab, 0xca, 0xe2, 0xfa, 0x32, 0xf6, 0x5d, 0xf1, 0xba, 0xdd, 0x48, 0x19, 0x23, - 0x18, 0xf8, 0x18, 0x2c, 0xb5, 0x62, 0x35, 0x49, 0x9d, 0x4f, 0xee, 0x6a, 0x7c, 0xd5, 0xa2, 0xde, - 0xe2, 0x58, 0xd9, 0x1b, 0xcf, 0x48, 0x75, 0x61, 0xb2, 0xb7, 0x78, 0xce, 0xca, 0xde, 0xf8, 0x8a, - 0x0e, 0x86, 0xd5, 0xa3, 0xf2, 0xcf, 0x1c, 0x58, 0x10, 0x59, 0xc1, 0x2f, 0x5c, 0x7e, 0x1a, 0x09, - 0x9d, 0x27, 0xc5, 0xd5, 0x49, 0x42, 0x67, 0xe6, 0x92, 0xce, 0x7f, 0x12, 0xe9, 0x9c, 0x67, 0xc8, - 0xda, 0xb0, 0x22, 0xb1, 0xf7, 0x4a, 0x08, 0xa1, 0xed, 0xcd, 0x50, 0xdb, 0x3c, 0x31, 0x2e, 0x8f, - 0x3f, 0xb6, 0x84, 0x28, 0x21, 0xec, 0x6d, 0x50, 0x30, 0xf9, 0x6d, 0xef, 0xb8, 0x94, 0x48, 0x5e, - 0x06, 0x53, 0xa9, 0x0a, 0x00, 0xdc, 0x1c, 0x0a, 0x9c, 0xe7, 0xc5, 0xa5, 0xa4, 0xc0, 0x23, 0x50, - 0xa8, 0xef, 0x1b, 0x91, 0xbe, 0xf3, 0x02, 0x93, 0x68, 0xf1, 0xa3, 0x0f, 0x13, 0xe2, 0x7e, 0x08, - 0x16, 0x43, 0x39, 0xb0, 0x25, 0xa1, 0xee, 0xab, 0x93, 0x3a, 0x87, 0x10, 0x1f, 0x47, 0xc1, 0xdd, - 0x84, 0x86, 0xb8, 0xb2, 0xb5, 0xc9, 0xbf, 0x9d, 0xa1, 0xa7, 0x51, 0x01, 0xed, 0x26, 0x04, 0x04, - 0x26, 0xb9, 0x0a, 0xe5, 0x93, 0x70, 0xc5, 0x17, 0x60, 0x03, 0xcc, 0x75, 0x31, 0x41, 0x2d, 0x44, - 0x90, 0x5a, 0x60, 0x95, 0xff, 0x5a, 0x3c, 0xd3, 0x86, 0x62, 0xaa, 0x3e, 0x11, 0x86, 0x0f, 0x6d, - 0xe2, 0x9d, 0x8a, 0x23, 0x6d, 0x84, 0x5e, 0xff, 0x19, 0x58, 0x8c, 0x19, 0xc0, 0x65, 0x90, 0x39, - 0xc2, 0xe1, 0xad, 0x3d, 0x1d, 0xc2, 0x55, 0x90, 0x3b, 0x41, 0x56, 0x0f, 0x33, 0x4d, 0x15, 0x0d, - 0xfe, 0xb0, 0x9d, 0xbe, 0xad, 0xe8, 0x45, 0x50, 0xf0, 0xf8, 0x5b, 0xf4, 0xf6, 0x9b, 0x77, 0xa5, - 0xd4, 0xdb, 0x77, 0xa5, 0xd4, 0x87, 0x77, 0x25, 0xe5, 0xe5, 0xa0, 0xa4, 0xfc, 0x79, 0x50, 0x52, - 0x5e, 0x0f, 0x4a, 0xca, 0x9b, 0x41, 0x49, 0xf9, 0xdf, 0xa0, 0xa4, 0x7c, 0x32, 0x28, 0xa5, 0x3e, - 0x0c, 0x4a, 0xca, 0xab, 0xf7, 0xa5, 0xd4, 0x9b, 0xf7, 0xa5, 0xd4, 0xdb, 0xf7, 0xa5, 0xd4, 0xaf, - 0x6f, 0xcc, 0xfc, 0x23, 0x74, 0x90, 0x67, 0x4c, 0x6d, 0x7e, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x8e, - 0xd2, 0x3a, 0x43, 0xd8, 0x1b, 0x00, 0x00, + // 1836 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xcd, 0x6f, 0x1c, 0x49, + 0x15, 0x9f, 0x9e, 0x4f, 0xcf, 0xf3, 0xc7, 0x9a, 0x8a, 0x71, 0x1a, 0xef, 0xee, 0xf4, 0x30, 0x12, + 0xbb, 0x06, 0xc1, 0x0c, 0x19, 0xef, 0x86, 0x5d, 0x13, 0xa2, 0x4d, 0xe3, 0x44, 0x0e, 0x64, 0x21, + 0xdb, 0xb6, 0x38, 0x70, 0x41, 0x65, 0x4f, 0x65, 0xdc, 0x78, 0xa6, 0xbb, 0xd3, 0x5d, 0xe3, 0xc4, + 0x12, 0x42, 0xfb, 0x0f, 0xac, 0xd8, 0xbf, 0x02, 0x71, 0x43, 0x48, 0x9c, 0x38, 0x71, 0x0c, 0x48, + 0x48, 0x39, 0xae, 0x46, 0xa2, 0x21, 0x0e, 0x42, 0xc8, 0xa7, 0x48, 0x5c, 0x39, 0xa0, 0xfa, 0xe8, + 0x9e, 0xaa, 0xe9, 0x36, 0x99, 0x09, 0xe2, 0x60, 0xe0, 0xe2, 0xa9, 0xae, 0x7a, 0xbf, 0xea, 0x57, + 0xbf, 0xf7, 0x7e, 0xaf, 0xab, 0xca, 0xf0, 0x76, 0x70, 0xdc, 0xef, 0x3c, 0x1c, 0x91, 0xd0, 0x25, + 0x21, 0xff, 0x3d, 0x0d, 0xb1, 0xd7, 0x27, 0x4a, 0xb3, 0x1d, 0x84, 0x3e, 0xf5, 0x11, 0x4c, 0x7a, + 0x36, 0xba, 0x7d, 0x97, 0x1e, 0x8d, 0x0e, 0xda, 0x87, 0xfe, 0xb0, 0xd3, 0xf7, 0xfb, 0x7e, 0xa7, + 0xef, 0xfb, 0xfd, 0x01, 0xc1, 0x81, 0x1b, 0xc9, 0x66, 0x27, 0x0c, 0x0e, 0x3b, 0x11, 0xc5, 0x74, + 0x14, 0x09, 0xfc, 0xc6, 0x1a, 0x33, 0xe4, 0x4d, 0x0e, 0x91, 0xbd, 0x96, 0x34, 0xe7, 0x4f, 0x07, + 0xa3, 0x07, 0x1d, 0xea, 0x0e, 0x49, 0x44, 0xf1, 0x30, 0x48, 0x0c, 0x98, 0x7f, 0x03, 0xbf, 0x2f, + 0x90, 0xae, 0xd7, 0x23, 0x8f, 0xfb, 0x98, 0x92, 0x47, 0xf8, 0x54, 0x1a, 0xbc, 0xae, 0x19, 0x24, + 0x0d, 0x39, 0xb8, 0xa1, 0x0d, 0x06, 0x98, 0x52, 0x12, 0x7a, 0x72, 0xec, 0x0b, 0xda, 0x58, 0x74, + 0x4c, 0xe8, 0xe1, 0x91, 0x1c, 0x6a, 0xca, 0xa1, 0x87, 0x83, 0xa1, 0xdf, 0x23, 0x03, 0xbe, 0x90, + 0x48, 0xfc, 0x95, 0x16, 0x57, 0x98, 0x45, 0x30, 0x8a, 0x8e, 0xf8, 0x1f, 0xd9, 0xf9, 0xed, 0x97, + 0x72, 0x79, 0x80, 0x23, 0xd2, 0xe9, 0x91, 0x07, 0xae, 0xe7, 0x52, 0xd7, 0xf7, 0x22, 0xb5, 0x2d, + 0x27, 0xb9, 0x3e, 0xdb, 0x24, 0xd3, 0xf1, 0x69, 0xfd, 0xaa, 0x04, 0x8b, 0xf7, 0xfc, 0x63, 0xd7, + 0x21, 0x0f, 0x47, 0x24, 0xa2, 0x68, 0x0d, 0x2a, 0xdc, 0xc6, 0x34, 0x9a, 0xc6, 0x66, 0xdd, 0x11, + 0x0f, 0xac, 0x77, 0xe0, 0x0e, 0x5d, 0x6a, 0x16, 0x9b, 0xc6, 0xe6, 0xb2, 0x23, 0x1e, 0x10, 0x82, + 0x72, 0x44, 0x49, 0x60, 0x96, 0x9a, 0xc6, 0x66, 0xc9, 0xe1, 0x6d, 0xb4, 0x01, 0x0b, 0xae, 0x47, + 0x49, 0x78, 0x82, 0x07, 0x66, 0x9d, 0xf7, 0xa7, 0xcf, 0xe8, 0x26, 0xd4, 0x22, 0x8a, 0x43, 0xba, + 0x1f, 0x99, 0xe5, 0xa6, 0xb1, 0xb9, 0xd8, 0xdd, 0x68, 0x8b, 0x38, 0xb6, 0x93, 0x38, 0xb6, 0xf7, + 0x93, 0x38, 0xda, 0x0b, 0x4f, 0x62, 0xab, 0xf0, 0xe9, 0x9f, 0x2c, 0xc3, 0x49, 0x40, 0x68, 0x1b, + 0x2a, 0xc4, 0xeb, 0xed, 0x47, 0x66, 0x65, 0x0e, 0xb4, 0x80, 0xa0, 0x6b, 0x50, 0xef, 0xb9, 0x21, + 0x39, 0x64, 0x9c, 0x99, 0xd5, 0xa6, 0xb1, 0xb9, 0xd2, 0xbd, 0xd2, 0x4e, 0xc3, 0xbe, 0x93, 0x0c, + 0x39, 0x13, 0x2b, 0xb6, 0xbc, 0x00, 0xd3, 0x23, 0xb3, 0xc6, 0x99, 0xe0, 0x6d, 0xd4, 0x82, 0x6a, + 0x74, 0x84, 0xc3, 0x5e, 0x64, 0x2e, 0x34, 0x4b, 0x9b, 0x75, 0x1b, 0xce, 0x63, 0x4b, 0xf6, 0x38, + 0xf2, 0x17, 0xfd, 0x08, 0xca, 0xc1, 0x00, 0x7b, 0x26, 0x70, 0x2f, 0x57, 0xdb, 0x0a, 0xe7, 0xf7, + 0x07, 0xd8, 0xb3, 0xdf, 0x1f, 0xc7, 0xd6, 0xbb, 0xaa, 0x14, 0x42, 0xfc, 0x00, 0x7b, 0xb8, 0x33, + 0xf0, 0x8f, 0xdd, 0xce, 0xc9, 0x56, 0x47, 0x8d, 0x24, 0x9b, 0xa8, 0xfd, 0x11, 0x9b, 0x80, 0x41, + 0x1d, 0x3e, 0x71, 0xeb, 0xf7, 0x45, 0x40, 0x2c, 0x66, 0x77, 0xbd, 0x88, 0x62, 0x8f, 0xbe, 0x4a, + 0xe8, 0x6e, 0x40, 0x95, 0x49, 0x66, 0x3f, 0xe2, 0xc1, 0x9b, 0x95, 0x4b, 0x89, 0xd1, 0xc9, 0x2c, + 0xcf, 0x45, 0x66, 0x25, 0x97, 0xcc, 0xea, 0x4b, 0xc9, 0xac, 0xfd, 0xa7, 0xc8, 0x34, 0xa1, 0xcc, + 0x9e, 0xd0, 0x2a, 0x94, 0x42, 0xfc, 0x88, 0x73, 0xb7, 0xe4, 0xb0, 0x66, 0xeb, 0xac, 0x0c, 0x4b, + 0x42, 0x1a, 0x51, 0xe0, 0x7b, 0x11, 0x61, 0xfe, 0xee, 0xf1, 0xda, 0x24, 0x18, 0x96, 0xfe, 0xf2, + 0x1e, 0x47, 0x8e, 0xa0, 0x0f, 0xa0, 0xbc, 0x83, 0x29, 0xe6, 0x6c, 0x2f, 0x76, 0xd7, 0x54, 0x7f, + 0xd9, 0x5c, 0x6c, 0xcc, 0x5e, 0x67, 0x84, 0x9e, 0xc7, 0xd6, 0x4a, 0x0f, 0x53, 0xfc, 0x55, 0x7f, + 0xe8, 0x52, 0x32, 0x0c, 0xe8, 0xa9, 0xc3, 0x91, 0xe8, 0x5d, 0xa8, 0xdf, 0x0e, 0x43, 0x3f, 0xdc, + 0x3f, 0x0d, 0x08, 0x8f, 0x4e, 0xdd, 0xbe, 0x7a, 0x1e, 0x5b, 0x57, 0x48, 0xd2, 0xa9, 0x20, 0x26, + 0x96, 0xe8, 0xcb, 0x50, 0xe1, 0x0f, 0x3c, 0x1e, 0x75, 0xfb, 0xca, 0x79, 0x6c, 0xbd, 0xc6, 0x21, + 0x8a, 0xb9, 0xb0, 0xd0, 0xc3, 0x57, 0x99, 0x29, 0x7c, 0x69, 0x16, 0x55, 0xd5, 0x2c, 0x32, 0xa1, + 0x76, 0x42, 0xc2, 0x88, 0x4d, 0x53, 0xe3, 0xfd, 0xc9, 0x23, 0xba, 0x05, 0xc0, 0x88, 0x71, 0x23, + 0xea, 0x1e, 0x32, 0xad, 0x30, 0x32, 0x96, 0xdb, 0xa2, 0x14, 0x3a, 0x24, 0x1a, 0x0d, 0xa8, 0x8d, + 0x24, 0x0b, 0x8a, 0xa1, 0xa3, 0xb4, 0xd1, 0x2f, 0x0d, 0xa8, 0xed, 0x12, 0xdc, 0x23, 0x61, 0x64, + 0xd6, 0x9b, 0xa5, 0xcd, 0xc5, 0xee, 0x97, 0xda, 0x6a, 0xdd, 0xbb, 0x1f, 0xfa, 0x43, 0x42, 0x8f, + 0xc8, 0x28, 0x4a, 0x02, 0x24, 0xac, 0x6d, 0x6f, 0x1c, 0x5b, 0x64, 0xc6, 0x94, 0x98, 0xa9, 0xdc, + 0x5e, 0xf8, 0xaa, 0xf3, 0xd8, 0x32, 0xbe, 0xe6, 0x24, 0x5e, 0xa2, 0x2e, 0x2c, 0x3c, 0xc2, 0xa1, + 0xe7, 0x7a, 0xfd, 0xc8, 0x04, 0x9e, 0xd1, 0xeb, 0xe7, 0xb1, 0x85, 0x92, 0x3e, 0x25, 0x10, 0xa9, + 0x5d, 0xeb, 0x8f, 0x06, 0x7c, 0x8e, 0x25, 0xc6, 0x1e, 0xf3, 0x27, 0x52, 0xa4, 0x3c, 0xc4, 0xf4, + 0xf0, 0xc8, 0x34, 0xd8, 0x34, 0x8e, 0x78, 0x50, 0xeb, 0x67, 0xf1, 0xdf, 0xaa, 0x9f, 0xa5, 0xf9, + 0xeb, 0x67, 0xa2, 0xdf, 0x72, 0xae, 0x7e, 0x2b, 0x17, 0xe9, 0xb7, 0xf5, 0xb3, 0x92, 0xa8, 0x55, + 0xc9, 0xfa, 0xe6, 0x90, 0xd2, 0x9d, 0x54, 0x4a, 0x25, 0xee, 0x6d, 0x9a, 0xa1, 0x62, 0xae, 0xbb, + 0x3d, 0xe2, 0x51, 0xf7, 0x81, 0x4b, 0xc2, 0x97, 0x08, 0x4a, 0xc9, 0xd2, 0x92, 0x9e, 0xa5, 0x6a, + 0x8a, 0x95, 0x2f, 0x45, 0x8a, 0xe9, 0xba, 0xaa, 0xbc, 0x82, 0xae, 0x5a, 0x7f, 0x2f, 0xc2, 0x3a, + 0x8b, 0xc8, 0x3d, 0x7c, 0x40, 0x06, 0xdf, 0xc3, 0xc3, 0x39, 0xa3, 0xf2, 0x96, 0x12, 0x95, 0xba, + 0x8d, 0xfe, 0xcf, 0xfa, 0x6c, 0xac, 0xff, 0xdc, 0x80, 0x85, 0xe4, 0x03, 0x80, 0xda, 0x00, 0x02, + 0xc6, 0x6b, 0xbc, 0xe0, 0x7a, 0x85, 0x81, 0xc3, 0xb4, 0xd7, 0x51, 0x2c, 0xd0, 0x8f, 0xa1, 0x2a, + 0x9e, 0xa4, 0x16, 0xae, 0x2a, 0x5a, 0xa0, 0x21, 0xc1, 0xc3, 0x5b, 0x3d, 0x1c, 0x50, 0x12, 0xda, + 0xef, 0x33, 0x2f, 0xc6, 0xb1, 0xf5, 0xf6, 0x45, 0x2c, 0x25, 0xfb, 0x4f, 0x89, 0x63, 0xf1, 0x15, + 0xef, 0x74, 0xe4, 0x1b, 0x5a, 0x9f, 0x18, 0xb0, 0xca, 0x1c, 0x65, 0xd4, 0xa4, 0x89, 0xb1, 0x03, + 0x0b, 0xa1, 0x6c, 0x73, 0x77, 0x17, 0xbb, 0xad, 0xb6, 0x4e, 0x6b, 0x0e, 0x95, 0x76, 0xf9, 0x49, + 0x6c, 0x19, 0x4e, 0x8a, 0x44, 0x5b, 0x1a, 0x8d, 0xc5, 0x3c, 0x1a, 0x19, 0xa4, 0xa0, 0x11, 0xf7, + 0xdb, 0x22, 0xa0, 0xbb, 0x6c, 0xff, 0xce, 0xf2, 0x6f, 0x92, 0xaa, 0x8f, 0x33, 0x1e, 0xbd, 0x31, + 0x21, 0x25, 0x6b, 0x6f, 0xdf, 0x1c, 0xc7, 0xd6, 0xf6, 0x4b, 0x72, 0xe7, 0x5f, 0xe0, 0x95, 0x55, + 0xa8, 0xe9, 0x5b, 0xbc, 0x0c, 0xe9, 0xdb, 0xfa, 0x75, 0x11, 0x56, 0x7e, 0xe0, 0x0f, 0x46, 0x43, + 0x92, 0xd2, 0x17, 0x64, 0xe8, 0x33, 0x27, 0xf4, 0xe9, 0xb6, 0xf6, 0xf6, 0x38, 0xb6, 0xae, 0xcf, + 0x4a, 0x9d, 0x8e, 0xbd, 0xd4, 0xb4, 0xfd, 0xb5, 0x08, 0x6b, 0xfb, 0x7e, 0xf0, 0xdd, 0x3d, 0x7e, + 0xc6, 0x53, 0xca, 0xe4, 0x51, 0x86, 0xbc, 0xb5, 0x09, 0x79, 0x0c, 0xf1, 0x21, 0xa6, 0xa1, 0xfb, + 0xd8, 0xbe, 0x3e, 0x8e, 0xad, 0xee, 0xac, 0xc4, 0x4d, 0x70, 0x97, 0x99, 0x34, 0x6d, 0x0f, 0x54, + 0x9a, 0x71, 0x0f, 0xf4, 0x8f, 0x22, 0xac, 0x7f, 0x34, 0xc2, 0x1e, 0x75, 0x07, 0x44, 0x90, 0x9d, + 0x52, 0xfd, 0x93, 0x0c, 0xd5, 0x8d, 0x09, 0xd5, 0x3a, 0x46, 0x92, 0xfe, 0xc1, 0x38, 0xb6, 0x6e, + 0xcc, 0x4a, 0x7a, 0xde, 0x0c, 0xff, 0x73, 0xf4, 0xff, 0xa6, 0x08, 0x2b, 0x7b, 0x62, 0xd7, 0x96, + 0x2c, 0xfc, 0x24, 0x87, 0x76, 0xf5, 0x12, 0x25, 0x38, 0x68, 0xeb, 0x88, 0xf9, 0x8a, 0x84, 0x8e, + 0xbd, 0xd4, 0x45, 0xe2, 0x0f, 0x45, 0x58, 0xdf, 0x21, 0x94, 0x1c, 0x52, 0xd2, 0xbb, 0xe3, 0x92, + 0x81, 0x42, 0xe2, 0xc7, 0x46, 0x86, 0xc5, 0xa6, 0x72, 0xcc, 0xca, 0x05, 0xd9, 0xf6, 0x38, 0xb6, + 0x6e, 0xce, 0xca, 0x63, 0xfe, 0x1c, 0x97, 0x9a, 0xcf, 0xdf, 0x15, 0xe1, 0xf3, 0xe2, 0x88, 0x2e, + 0x6e, 0xdd, 0x26, 0x74, 0xfe, 0x34, 0xc3, 0xa6, 0xa5, 0x96, 0x82, 0x1c, 0x88, 0x7d, 0x6b, 0x1c, + 0x5b, 0xdf, 0x9a, 0xbd, 0x16, 0xe4, 0x4c, 0xf1, 0x5f, 0x93, 0x9b, 0x7c, 0xb7, 0x3f, 0x6f, 0x6e, + 0xea, 0xa0, 0x57, 0xcb, 0x4d, 0x7d, 0x8e, 0x4b, 0xcd, 0xe7, 0x5f, 0xaa, 0xb0, 0xcc, 0xb3, 0x24, + 0xa5, 0xf1, 0x2b, 0x20, 0x8f, 0x47, 0x92, 0x43, 0x94, 0x1c, 0xa9, 0xc3, 0xe0, 0xb0, 0xbd, 0x27, + 0x0f, 0x4e, 0xc2, 0x02, 0xbd, 0x07, 0xd5, 0x88, 0x1f, 0x5c, 0xe5, 0xce, 0xb7, 0x31, 0x7d, 0x37, + 0xa4, 0x1f, 0x91, 0x77, 0x0b, 0x8e, 0xb4, 0x47, 0x37, 0xa0, 0x3a, 0xe0, 0x2c, 0xca, 0x83, 0x7b, + 0x6b, 0x1a, 0x99, 0x3d, 0xca, 0x31, 0xb4, 0xc0, 0xa0, 0xeb, 0x50, 0xe1, 0x5b, 0x6c, 0x79, 0xe7, + 0xaa, 0xbd, 0x36, 0xbb, 0xd1, 0xdd, 0x2d, 0x38, 0xc2, 0x1c, 0x75, 0xa1, 0x1c, 0x84, 0xfe, 0x50, + 0x1e, 0x77, 0xde, 0x98, 0x7e, 0xa7, 0x7a, 0x3e, 0xd8, 0x2d, 0x38, 0xdc, 0x16, 0xbd, 0x03, 0xb5, + 0x88, 0x1f, 0x2c, 0x22, 0x7e, 0x51, 0xc4, 0x76, 0x95, 0x53, 0x30, 0x05, 0x92, 0x98, 0xa2, 0x77, + 0xa0, 0x7a, 0xc2, 0xb7, 0x8d, 0xf2, 0x96, 0x6f, 0x43, 0x05, 0xe9, 0x1b, 0x4a, 0xb6, 0x2e, 0x61, + 0x8b, 0xee, 0xc0, 0x12, 0xf5, 0x83, 0xe3, 0x64, 0x77, 0x26, 0x2f, 0x99, 0x9a, 0x2a, 0x36, 0x6f, + 0xf7, 0xb6, 0x5b, 0x70, 0x34, 0x1c, 0xba, 0x0f, 0xab, 0x0f, 0xb5, 0x6d, 0x00, 0x89, 0xf8, 0xcd, + 0xf5, 0x14, 0xcf, 0xf9, 0x1b, 0x94, 0xdd, 0x82, 0x93, 0x41, 0xa3, 0x1d, 0x58, 0x89, 0xb4, 0x2f, + 0x9c, 0xbc, 0x0a, 0xd6, 0xd6, 0xa5, 0x7f, 0x03, 0x77, 0x0b, 0xce, 0x14, 0x06, 0xdd, 0x83, 0x95, + 0x9e, 0x56, 0xdf, 0xcd, 0xc5, 0xac, 0x57, 0xf9, 0x5f, 0x00, 0x36, 0x9b, 0x8e, 0x45, 0xdf, 0x87, + 0xd5, 0x60, 0xaa, 0xb6, 0x99, 0x4b, 0x7c, 0xbe, 0x2f, 0xea, 0xab, 0xcc, 0x29, 0x82, 0x6c, 0x91, + 0xd3, 0x60, 0xd5, 0x3d, 0x21, 0x71, 0x73, 0xf9, 0x62, 0xf7, 0xf4, 0x22, 0xa0, 0xba, 0x27, 0x46, + 0x6c, 0x98, 0x94, 0xa3, 0xd6, 0x27, 0x55, 0x58, 0x92, 0x32, 0x13, 0xb7, 0x61, 0xdf, 0x48, 0x95, + 0x23, 0x54, 0xf6, 0xe6, 0x45, 0xca, 0xe1, 0xe6, 0x8a, 0x70, 0xbe, 0x9e, 0x0a, 0x47, 0x48, 0x6e, + 0x7d, 0x52, 0xe2, 0xf8, 0x7b, 0x15, 0x84, 0x14, 0xcb, 0x56, 0x22, 0x16, 0xa1, 0xb4, 0xd7, 0xf3, + 0xcf, 0x94, 0x09, 0x4a, 0x2a, 0x65, 0x1b, 0x6a, 0xae, 0xb8, 0x8a, 0xcf, 0xd3, 0x58, 0xf6, 0xa6, + 0x9e, 0xe5, 0xbe, 0x04, 0xa0, 0xad, 0x89, 0x62, 0x84, 0xd0, 0xae, 0x66, 0x15, 0x93, 0x82, 0x12, + 0xc1, 0x5c, 0x4b, 0x05, 0x53, 0x95, 0x98, 0xcc, 0xf9, 0x2b, 0x5d, 0x98, 0x54, 0xcb, 0x6d, 0x58, + 0x4e, 0xf2, 0x8b, 0x0f, 0x49, 0xb9, 0xbc, 0x79, 0xd1, 0xb6, 0x2e, 0xc1, 0xeb, 0x28, 0x74, 0x37, + 0x93, 0x94, 0xf5, 0xe9, 0x4f, 0xf1, 0x74, 0x4a, 0x26, 0x33, 0x4d, 0x67, 0xe4, 0x77, 0xe0, 0xb5, + 0x49, 0x52, 0x09, 0x9f, 0x20, 0xbb, 0xc3, 0xd7, 0xd2, 0x31, 0x99, 0x6a, 0x1a, 0xa8, 0xba, 0x25, + 0x93, 0x71, 0xf1, 0x22, 0xb7, 0x92, 0x54, 0xcc, 0xb8, 0x25, 0x06, 0xd0, 0x2e, 0x2c, 0x0c, 0x09, + 0xc5, 0x3d, 0x4c, 0xb1, 0x59, 0xe3, 0x9f, 0xa5, 0xb7, 0x32, 0x02, 0x91, 0xe8, 0xf6, 0x87, 0xd2, + 0xf0, 0xb6, 0x47, 0xc3, 0x53, 0x79, 0x77, 0x91, 0xa2, 0x37, 0xbe, 0x09, 0xcb, 0x9a, 0x01, 0x5a, + 0x85, 0xd2, 0x31, 0x49, 0xfe, 0x3d, 0xc3, 0x9a, 0x68, 0x0d, 0x2a, 0x27, 0x78, 0x30, 0x22, 0x3c, + 0x3f, 0xeb, 0x8e, 0x78, 0xd8, 0x2e, 0xbe, 0x67, 0xd8, 0x75, 0xa8, 0x85, 0xe2, 0x2d, 0x76, 0xff, + 0xe9, 0xb3, 0x46, 0xe1, 0xb3, 0x67, 0x8d, 0xc2, 0x8b, 0x67, 0x0d, 0xe3, 0xe3, 0xb3, 0x86, 0xf1, + 0x8b, 0xb3, 0x86, 0xf1, 0xe4, 0xac, 0x61, 0x3c, 0x3d, 0x6b, 0x18, 0x7f, 0x3e, 0x6b, 0x18, 0x7f, + 0x3b, 0x6b, 0x14, 0x5e, 0x9c, 0x35, 0x8c, 0x4f, 0x9f, 0x37, 0x0a, 0x4f, 0x9f, 0x37, 0x0a, 0x9f, + 0x3d, 0x6f, 0x14, 0x7e, 0x78, 0x6d, 0xee, 0x2f, 0xe4, 0x41, 0x95, 0x33, 0xb5, 0xf5, 0xcf, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x96, 0xad, 0x71, 0x3d, 0xdd, 0x1d, 0x00, 0x00, } func (this *LokiRequest) Equal(that interface{}) bool { @@ -2125,6 +2194,42 @@ func (this *DetectedFieldsResponse) Equal(that interface{}) bool { } return true } +func (this *QueryPatternsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryPatternsResponse) + if !ok { + that2, ok := that.(QueryPatternsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Response == nil { + if this.Response != nil { + return false + } + } else if !this.Response.Equal(*that1.Response) { + return false + } + if len(this.Headers) != len(that1.Headers) { + return false + } + for i := range this.Headers { + if !this.Headers[i].Equal(that1.Headers[i]) { + return false + } + } + return true +} func (this *DetectedLabelsResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2434,6 +2539,30 @@ func (this *QueryResponse_DetectedFields) Equal(that interface{}) bool { } return true } +func (this *QueryResponse_PatternsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResponse_PatternsResponse) + if !ok { + that2, ok := that.(QueryResponse_PatternsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.PatternsResponse.Equal(that1.PatternsResponse) { + return false + } + return true +} func (this *QueryResponse_DetectedLabels) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2688,6 +2817,30 @@ func (this *QueryRequest_DetectedFields) Equal(that interface{}) bool { } return true } +func (this *QueryRequest_PatternsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryRequest_PatternsRequest) + if !ok { + that2, ok := that.(QueryRequest_PatternsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.PatternsRequest.Equal(that1.PatternsRequest) { + return false + } + return true +} func (this *QueryRequest_DetectedLabels) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2916,6 +3069,17 @@ func (this *DetectedFieldsResponse) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *QueryPatternsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&queryrange.QueryPatternsResponse{") + s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n") + s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *DetectedLabelsResponse) GoString() string { if this == nil { return "nil" @@ -2931,7 +3095,7 @@ func (this *QueryResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 16) + s := make([]string, 0, 17) s = append(s, "&queryrange.QueryResponse{") if this.Status != nil { s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") @@ -3022,6 +3186,14 @@ func (this *QueryResponse_DetectedFields) GoString() string { `DetectedFields:` + fmt.Sprintf("%#v", this.DetectedFields) + `}`}, ", ") return s } +func (this *QueryResponse_PatternsResponse) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&queryrange.QueryResponse_PatternsResponse{` + + `PatternsResponse:` + fmt.Sprintf("%#v", this.PatternsResponse) + `}`}, ", ") + return s +} func (this *QueryResponse_DetectedLabels) GoString() string { if this == nil { return "nil" @@ -3034,7 +3206,7 @@ func (this *QueryRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 14) + s := make([]string, 0, 15) s = append(s, "&queryrange.QueryRequest{") if this.Request != nil { s = append(s, "Request: "+fmt.Sprintf("%#v", this.Request)+",\n") @@ -3119,6 +3291,14 @@ func (this *QueryRequest_DetectedFields) GoString() string { `DetectedFields:` + fmt.Sprintf("%#v", this.DetectedFields) + `}`}, ", ") return s } +func (this *QueryRequest_PatternsRequest) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&queryrange.QueryRequest_PatternsRequest{` + + `PatternsRequest:` + fmt.Sprintf("%#v", this.PatternsRequest) + `}`}, ", ") + return s +} func (this *QueryRequest_DetectedLabels) GoString() string { if this == nil { return "nil" @@ -4043,6 +4223,55 @@ func (m *DetectedFieldsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *QueryPatternsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPatternsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPatternsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Headers[iNdEx].Size() + i -= size + if _, err := m.Headers[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Response != nil { + { + size := m.Response.Size() + i -= size + if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *DetectedLabelsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4336,6 +4565,26 @@ func (m *QueryResponse_DetectedFields) MarshalToSizedBuffer(dAtA []byte) (int, e } return len(dAtA) - i, nil } +func (m *QueryResponse_PatternsResponse) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *QueryResponse_PatternsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PatternsResponse != nil { + { + size, err := m.PatternsResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} func (m *QueryResponse_DetectedLabels) MarshalTo(dAtA []byte) (int, error) { return m.MarshalToSizedBuffer(dAtA[:m.Size()]) } @@ -4352,7 +4601,7 @@ func (m *QueryResponse_DetectedLabels) MarshalToSizedBuffer(dAtA []byte) (int, e i = encodeVarintQueryrange(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x62 + dAtA[i] = 0x6a } return len(dAtA) - i, nil } @@ -4567,6 +4816,26 @@ func (m *QueryRequest_DetectedFields) MarshalToSizedBuffer(dAtA []byte) (int, er } return len(dAtA) - i, nil } +func (m *QueryRequest_PatternsRequest) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *QueryRequest_PatternsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PatternsRequest != nil { + { + size, err := m.PatternsRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} func (m *QueryRequest_DetectedLabels) MarshalTo(dAtA []byte) (int, error) { return m.MarshalToSizedBuffer(dAtA[:m.Size()]) } @@ -4583,7 +4852,7 @@ func (m *QueryRequest_DetectedLabels) MarshalToSizedBuffer(dAtA []byte) (int, er i = encodeVarintQueryrange(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x52 + dAtA[i] = 0x5a } return len(dAtA) - i, nil } @@ -4984,6 +5253,25 @@ func (m *DetectedFieldsResponse) Size() (n int) { return n } +func (m *QueryPatternsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovQueryrange(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.Size() + n += 1 + l + sovQueryrange(uint64(l)) + } + } + return n +} + func (m *DetectedLabelsResponse) Size() (n int) { if m == nil { return 0 @@ -5139,6 +5427,18 @@ func (m *QueryResponse_DetectedFields) Size() (n int) { } return n } +func (m *QueryResponse_PatternsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PatternsResponse != nil { + l = m.PatternsResponse.Size() + n += 1 + l + sovQueryrange(uint64(l)) + } + return n +} func (m *QueryResponse_DetectedLabels) Size() (n int) { if m == nil { return 0 @@ -5267,6 +5567,18 @@ func (m *QueryRequest_DetectedFields) Size() (n int) { } return n } +func (m *QueryRequest_PatternsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PatternsRequest != nil { + l = m.PatternsRequest.Size() + n += 1 + l + sovQueryrange(uint64(l)) + } + return n +} func (m *QueryRequest_DetectedLabels) Size() (n int) { if m == nil { return 0 @@ -5487,6 +5799,17 @@ func (this *DetectedFieldsResponse) String() string { }, "") return s } +func (this *QueryPatternsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryPatternsResponse{`, + `Response:` + fmt.Sprintf("%v", this.Response) + `,`, + `Headers:` + fmt.Sprintf("%v", this.Headers) + `,`, + `}`, + }, "") + return s +} func (this *DetectedLabelsResponse) String() string { if this == nil { return "nil" @@ -5609,6 +5932,16 @@ func (this *QueryResponse_DetectedFields) String() string { }, "") return s } +func (this *QueryResponse_PatternsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryResponse_PatternsResponse{`, + `PatternsResponse:` + strings.Replace(fmt.Sprintf("%v", this.PatternsResponse), "QueryPatternsResponse", "QueryPatternsResponse", 1) + `,`, + `}`, + }, "") + return s +} func (this *QueryResponse_DetectedLabels) String() string { if this == nil { return "nil" @@ -5720,6 +6053,16 @@ func (this *QueryRequest_DetectedFields) String() string { }, "") return s } +func (this *QueryRequest_PatternsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryRequest_PatternsRequest{`, + `PatternsRequest:` + strings.Replace(fmt.Sprintf("%v", this.PatternsRequest), "QueryPatternsRequest", "logproto.QueryPatternsRequest", 1) + `,`, + `}`, + }, "") + return s +} func (this *QueryRequest_DetectedLabels) String() string { if this == nil { return "nil" @@ -8412,6 +8755,129 @@ func (m *DetectedFieldsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *QueryPatternsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPatternsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPatternsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &github_com_grafana_loki_v3_pkg_logproto.QueryPatternsResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Headers = append(m.Headers, github_com_grafana_loki_v3_pkg_querier_queryrange_queryrangebase_definitions.PrometheusResponseHeader{}) + if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQueryrange(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQueryrange + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQueryrange + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DetectedLabelsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8951,6 +9417,41 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error { m.Response = &QueryResponse_DetectedFields{v} iNdEx = postIndex case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternsResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &QueryPatternsResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &QueryResponse_PatternsResponse{v} + iNdEx = postIndex + case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DetectedLabels", wireType) } @@ -9446,6 +9947,41 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { m.Request = &QueryRequest_DetectedFields{v} iNdEx = postIndex case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternsRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &logproto.QueryPatternsRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &QueryRequest_PatternsRequest{v} + iNdEx = postIndex + case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DetectedLabels", wireType) } diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index 211699e836edb..91d2488426246 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -7,6 +7,7 @@ import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "pkg/logproto/indexgateway.proto"; import "pkg/logproto/logproto.proto"; +import "pkg/logproto/pattern.proto"; import "pkg/logproto/sketch.proto"; import "pkg/logqlmodel/stats/stats.proto"; import "pkg/push/push.proto"; @@ -186,6 +187,13 @@ message DetectedFieldsResponse { ]; } +message QueryPatternsResponse { + logproto.QueryPatternsResponse response = 1 [(gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/logproto.QueryPatternsResponse"]; + repeated definitions.PrometheusResponseHeader Headers = 2 [ + (gogoproto.jsontag) = "-", + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions.PrometheusResponseHeader" + ]; +} message DetectedLabelsResponse { logproto.DetectedLabelsResponse response = 1 [(gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/logproto.DetectedLabelsResponse"]; repeated definitions.PrometheusResponseHeader Headers = 2 [ @@ -207,7 +215,8 @@ message QueryResponse { QuantileSketchResponse quantileSketches = 9; ShardsResponse shardsResponse = 10; DetectedFieldsResponse detectedFields = 11; - DetectedLabelsResponse detectedLabels = 12; + QueryPatternsResponse patternsResponse = 12; + DetectedLabelsResponse detectedLabels = 13; } } @@ -221,7 +230,8 @@ message QueryRequest { logproto.VolumeRequest volume = 6; indexgatewaypb.ShardsRequest shardsRequest = 8; logproto.DetectedFieldsRequest detectedFields = 9; - logproto.DetectedLabelsRequest detectedLabels = 10; + logproto.QueryPatternsRequest patternsRequest = 10; + logproto.DetectedLabelsRequest detectedLabels = 11; } map metadata = 7 [(gogoproto.nullable) = false]; } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 3118ceb13136a..012d7778d42a8 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -245,7 +245,7 @@ func NewMiddleware( instantRT = instantMetricTripperware.Wrap(next) statsRT = indexStatsTripperware.Wrap(next) seriesVolumeRT = seriesVolumeTripperware.Wrap(next) - detectedFieldsRT = next //TODO(twhitney): add middlewares for detected fields + detectedFieldsRT = next // TODO(twhitney): add middlewares for detected fields detectedLabelsRT = next // TODO(shantanu): add middlewares ) @@ -415,6 +415,7 @@ const ( VolumeRangeOp = "volume_range" IndexShardsOp = "index_shards" DetectedFieldsOp = "detected_fields" + PatternsQueryOp = "patterns" DetectedLabelsOp = "detected_labels" ) @@ -438,6 +439,8 @@ func getOperation(path string) string { return IndexShardsOp case path == "/loki/api/v1/detected_fields": return DetectedFieldsOp + case path == "/loki/api/v1/patterns": + return PatternsQueryOp case path == "/loki/api/v1/detected_labels": return DetectedLabelsOp default: @@ -943,7 +946,6 @@ func NewVolumeTripperware(cfg Config, log log.Logger, limits Limits, schema conf schema, metricsNamespace, ) - if err != nil { return nil, err } diff --git a/pkg/querier/queryrange/stats.go b/pkg/querier/queryrange/stats.go index 4e5b646e7429b..384ee7ceed53c 100644 --- a/pkg/querier/queryrange/stats.go +++ b/pkg/querier/queryrange/stats.go @@ -37,6 +37,7 @@ const ( queryTypeVolume = "volume" queryTypeShards = "shards" queryTypeDetectedFields = "detected_fields" + queryTypeQueryPatterns = "patterns" queryTypeDetectedLabels = "detected_labels" ) @@ -174,6 +175,10 @@ func StatsCollectorMiddleware() queryrangebase.Middleware { responseStats = &stats.Result{} // TODO: support stats in detected fields totalEntries = 1 queryType = queryTypeDetectedFields + case *QueryPatternsResponse: + responseStats = &stats.Result{} // TODO: support stats in query patterns + totalEntries = len(r.Response.Series) + queryType = queryTypeQueryPatterns default: level.Warn(logger).Log("msg", fmt.Sprintf("cannot compute stats, unexpected type: %T", resp)) } diff --git a/pkg/util/marshal/marshal.go b/pkg/util/marshal/marshal.go index dc99635cb0950..1a4d6701b1b18 100644 --- a/pkg/util/marshal/marshal.go +++ b/pkg/util/marshal/marshal.go @@ -44,6 +44,8 @@ func WriteResponseJSON(r *http.Request, v any, w http.ResponseWriter) error { return WriteIndexStatsResponseJSON(result, w) case *logproto.VolumeResponse: return WriteVolumeResponseJSON(result, w) + case *logproto.QueryPatternsResponse: + return WriteQueryPatternsResponseJSON(result, w) } return fmt.Errorf("unknown response type %T", v) } @@ -185,6 +187,49 @@ func WriteDetectedFieldsResponseJSON(r *logproto.DetectedFieldsResponse, w io.Wr return s.Flush() } +// WriteQueryPatternsResponseJSON marshals a logproto.QueryPatternsResponse to JSON and then +// writes it to the provided io.Writer. +func WriteQueryPatternsResponseJSON(r *logproto.QueryPatternsResponse, w io.Writer) error { + s := jsoniter.ConfigFastest.BorrowStream(w) + defer jsoniter.ConfigFastest.ReturnStream(s) + s.WriteObjectStart() + s.WriteObjectField("status") + s.WriteString("success") + + s.WriteMore() + s.WriteObjectField("data") + s.WriteArrayStart() + if len(r.Series) > 0 { + for i, series := range r.Series { + s.WriteObjectStart() + s.WriteObjectField("pattern") + s.WriteStringWithHTMLEscaped(series.Pattern) + s.WriteMore() + s.WriteObjectField("samples") + s.WriteArrayStart() + for j, sample := range series.Samples { + s.WriteArrayStart() + s.WriteInt64(sample.Timestamp.Unix()) + s.WriteMore() + s.WriteInt64(sample.Value) + s.WriteArrayEnd() + if j < len(series.Samples)-1 { + s.WriteMore() + } + } + s.WriteArrayEnd() + s.WriteObjectEnd() + if i < len(r.Series)-1 { + s.WriteMore() + } + } + } + s.WriteArrayEnd() + s.WriteObjectEnd() + s.WriteRaw("\n") + return s.Flush() +} + // WriteDetectedLabelsResponseJSON marshals a logproto.DetectedLabelsResponse to JSON and then // writes it to the provided io.Writer. func WriteDetectedLabelsResponseJSON(r *logproto.DetectedLabelsResponse, w io.Writer) error { diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go index 380768d9cb06d..1ccbe2158dfc5 100644 --- a/pkg/util/marshal/marshal_test.go +++ b/pkg/util/marshal/marshal_test.go @@ -10,6 +10,7 @@ import ( "time" json "github.com/json-iterator/go" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -1062,3 +1063,74 @@ func Test_WriteTailResponseJSON(t *testing.T) { ), ) } + +func Test_WriteQueryPatternsResponseJSON(t *testing.T) { + for i, tc := range []struct { + input *logproto.QueryPatternsResponse + expected string + }{ + { + &logproto.QueryPatternsResponse{}, + `{"status":"success","data":[]}`, + }, + { + &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{ + { + Pattern: "foo <*> bar", + Samples: []*logproto.PatternSample{ + {Timestamp: model.TimeFromUnix(1), Value: 1}, + {Timestamp: model.TimeFromUnix(2), Value: 2}, + }, + }, + }, + }, + `{"status":"success","data":[{"pattern":"foo <*> bar","samples":[[1,1],[2,2]]}]}`, + }, + { + &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{ + { + Pattern: "foo <*> bar", + Samples: []*logproto.PatternSample{ + {Timestamp: model.TimeFromUnix(1), Value: 1}, + {Timestamp: model.TimeFromUnix(2), Value: 2}, + }, + }, + { + Pattern: "foo <*> buzz", + Samples: []*logproto.PatternSample{ + {Timestamp: model.TimeFromUnix(3), Value: 1}, + {Timestamp: model.TimeFromUnix(3), Value: 2}, + }, + }, + }, + }, + `{"status":"success","data":[{"pattern":"foo <*> bar","samples":[[1,1],[2,2]]},{"pattern":"foo <*> buzz","samples":[[3,1],[3,2]]}]}`, + }, + { + &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{ + { + Pattern: "foo <*> bar", + Samples: []*logproto.PatternSample{}, + }, + { + Pattern: "foo <*> buzz", + Samples: []*logproto.PatternSample{}, + }, + }, + }, + `{"status":"success","data":[{"pattern":"foo <*> bar","samples":[]},{"pattern":"foo <*> buzz","samples":[]}]}`, + }, + } { + tc := tc + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + var b bytes.Buffer + err := WriteQueryPatternsResponseJSON(tc.input, &b) + require.NoError(t, err) + got := b.String() + require.JSONEqf(t, tc.expected, got, "Patterns Test %d failed", i) + }) + } +} diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index 20cd6ad1fe419..bd7a52e53d8ea 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -256,7 +256,34 @@ interface_names: ['eth0'], }, }, + pattern_ingester: { + enabled: $._config.pattern_ingester.enabled, + lifecycler: { + ring: { + heartbeat_timeout: '1m', + replication_factor: 1, + kvstore: if $._config.memberlist_ring_enabled then {} else { + store: 'consul', + consul: { + host: 'consul.%s.svc.cluster.local:8500' % $._config.namespace, + http_client_timeout: '20s', + consistent_reads: true, + }, + }, + }, + num_tokens: 512, + heartbeat_period: '5s', + join_after: '30s', + interface_names: ['eth0'], + }, + client_config: { + grpc_client_config: { + max_recv_msg_size: 1024 * 1024 * 64, + }, + remote_timeout: '1s', + }, + }, ingester_client: { grpc_client_config: { max_recv_msg_size: 1024 * 1024 * 64, diff --git a/production/ksonnet/loki/images.libsonnet b/production/ksonnet/loki/images.libsonnet index 0dc2bbe105ce9..0d56e4349120f 100644 --- a/production/ksonnet/loki/images.libsonnet +++ b/production/ksonnet/loki/images.libsonnet @@ -8,6 +8,7 @@ distributor:: self.loki, ingester:: self.loki, + pattern_ingester:: self.loki, querier:: self.loki, tableManager:: self.loki, query_frontend:: self.loki, diff --git a/production/ksonnet/loki/loki.libsonnet b/production/ksonnet/loki/loki.libsonnet index ad0489a69cd3f..1a846d74384b4 100644 --- a/production/ksonnet/loki/loki.libsonnet +++ b/production/ksonnet/loki/loki.libsonnet @@ -35,4 +35,7 @@ (import 'memberlist.libsonnet') + // Prometheus ServiceMonitor -(import 'servicemonitor.libsonnet') +(import 'servicemonitor.libsonnet') + + +// Patterns ingester +(import 'patterns.libsonnet') diff --git a/production/ksonnet/loki/patterns.libsonnet b/production/ksonnet/loki/patterns.libsonnet new file mode 100644 index 0000000000000..06f181cb5d078 --- /dev/null +++ b/production/ksonnet/loki/patterns.libsonnet @@ -0,0 +1,56 @@ +local k = import 'ksonnet-util/kausal.libsonnet'; + +{ + local container = k.core.v1.container, + local podDisruptionBudget = k.policy.v1.podDisruptionBudget, + + _config+:: { + pattern_ingester: { + // globally enable or disable the use of the pattern ingester + enabled: false, + replicas: 3, + allow_multiple_replicas_on_same_node: false, + }, + }, + + pattern_ingester_args:: $._config.commonArgs { + target: 'pattern-ingester', + }, + + pattern_ingester_ports:: $.util.defaultPorts, + + + pattern_ingester_container:: + container.new('pattern-ingester', $._images.pattern_ingester) + + container.withPorts($.pattern_ingester_ports) + + container.withArgsMixin(k.util.mapToFlags($.pattern_ingester_args)) + + container.mixin.readinessProbe.httpGet.withPath('/ready') + + container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port) + + container.mixin.readinessProbe.withInitialDelaySeconds(15) + + container.mixin.readinessProbe.withTimeoutSeconds(1) + + k.util.resourcesRequests('1', '7Gi') + + k.util.resourcesLimits('2', '14Gi') + + container.withEnvMixin($._config.commonEnvs), + + + pattern_ingester_statefulset: + if $._config.pattern_ingester.enabled then ( + $.newLokiStatefulSet('pattern-ingester', $._config.pattern_ingester.replicas, $.pattern_ingester_container, []) + + $.util.podPriority('high') + + (if !$._config.pattern_ingester.allow_multiple_replicas_on_same_node then $.util.antiAffinity else {}) + ) else {}, + + pattern_ingester_service: + if $._config.pattern_ingester.enabled then ( + k.util.serviceFor($.pattern_ingester_statefulset, $._config.service_ignored_labels) + ) else {}, + + pattern_ingester_pdb: + if $._config.pattern_ingester.enabled then ( + podDisruptionBudget.new('loki-patter-ingester-pdb') + + podDisruptionBudget.mixin.metadata.withLabels({ name: 'loki-pattern-ingester-pdb' }) + + podDisruptionBudget.mixin.spec.selector.withMatchLabels({ name: 'pattern-ingester' }) + + podDisruptionBudget.mixin.spec.withMaxUnavailable(1) + ) else {}, + +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE new file mode 100644 index 0000000000000..0e5d580e0e964 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 0000000000000..5cd74a0343317 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list new file mode 100644 index 0000000000000..c4764e6b2f088 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -0,0 +1,29 @@ +This license applies to simplelru/list.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go new file mode 100644 index 0000000000000..f69792388c1a5 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package simplelru + +import ( + "errors" + + "github.com/hashicorp/golang-lru/v2/internal" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback[K comparable, V any] func(key K, value V) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU[K comparable, V any] struct { + size int + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] +} + +// NewLRU constructs an LRU of the given size +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { + if size <= 0 { + return nil, errors.New("must provide a positive size") + } + + c := &LRU[K, V]{ + size: size, + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU[K, V]) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value = value + return false + } + + // Add new item + ent := c.evictList.PushFront(key, value) + c.items[key] = ent + + evict := c.evictList.Length() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU[K, V]) Contains(key K) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] + if ent, ok = c.items[key]; ok { + return ent.Value, true + } + return +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU[K, V]) Remove(key K) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + return ent.Key, ent.Value, true + } + return +} + +// GetOldest returns the oldest entry +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true + } + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key + i++ + } + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + +// Len returns the number of items in the cache. +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() +} + +// Resize changes the cache size. +func (c *LRU[K, V]) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { + c.evictList.Remove(e) + delete(c.items, e.Key) + if c.onEvict != nil { + c.onEvict(e.Key, e.Value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go new file mode 100644 index 0000000000000..043b8bcc3f3f5 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package simplelru provides simple LRU implementation based on build-in container/list. +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache[K comparable, V any] interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key K, value V) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key K) (value V, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key K) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key K) (value V, ok bool) + + // Removes a key from the cache. + Remove(key K) bool + + // Removes the oldest entry from cache. + RemoveOldest() (K, V, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (K, V, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0eefeb19741bd..f8bd24ec449a5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1004,6 +1004,10 @@ github.com/hashicorp/go-version ## explicit; go 1.12 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru +# github.com/hashicorp/golang-lru/v2 v2.0.7 +## explicit; go 1.18 +github.com/hashicorp/golang-lru/v2/internal +github.com/hashicorp/golang-lru/v2/simplelru # github.com/hashicorp/memberlist v0.5.0 => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe ## explicit; go 1.12 github.com/hashicorp/memberlist