Skip to content

Commit

Permalink
Add cache hit log lines for instant metric query
Browse files Browse the repository at this point in the history
Signed-off-by: Kaviraj <[email protected]>
  • Loading branch information
kavirajk committed Feb 20, 2024
1 parent bbe5605 commit 38e71d6
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 7 deletions.
11 changes: 11 additions & 0 deletions cmd/loki/loki-local-with-memcached.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,17 @@ query_range:
cache_results: true
cache_volume_results: true
cache_series_results: true
cache_instant_metric_results: true
instant_metric_query_split_align: true
instant_metric_results_cache:
cache:
default_validity: 12h
memcached_client:
consistent_hash: true
addresses: "dns+localhost:11211"
max_idle_conns: 16
timeout: 500ms
update_interval: 1m
series_results_cache:
cache:
default_validity: 12h
Expand Down
17 changes: 12 additions & 5 deletions pkg/logql/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,8 @@ func RecordRangeAndInstantQueryMetrics(
) {
var (
logger = fixLogger(ctx, log)
rt = string(GetRangeType(p))
rangeType = GetRangeType(p)
rt = string(rangeType)
latencyType = latencyTypeFast
returnedLines = 0
)
Expand All @@ -103,6 +104,12 @@ func RecordRangeAndInstantQueryMetrics(
level.Warn(logger).Log("msg", "error parsing query type", "err", err)
}

resultCache := stats.Caches.Result

if queryType == QueryTypeMetric && rangeType == InstantType {
resultCache = stats.Caches.InstantMetricResult
}

// Tag throughput metric by latency type based on a threshold.
// Latency below the threshold is fast, above is slow.
if stats.Summary.ExecTime > slowQueryThresholdSecond {
Expand Down Expand Up @@ -162,10 +169,10 @@ func RecordRangeAndInstantQueryMetrics(
"cache_volume_results_req", stats.Caches.VolumeResult.EntriesRequested,
"cache_volume_results_hit", stats.Caches.VolumeResult.EntriesFound,
"cache_volume_results_download_time", stats.Caches.VolumeResult.CacheDownloadTime(),
"cache_result_req", stats.Caches.Result.EntriesRequested,
"cache_result_hit", stats.Caches.Result.EntriesFound,
"cache_result_download_time", stats.Caches.Result.CacheDownloadTime(),
"cache_result_query_length_served", stats.Caches.Result.CacheQueryLengthServed(),
"cache_result_req", resultCache.EntriesRequested,
"cache_result_hit", resultCache.EntriesFound,
"cache_result_download_time", resultCache.CacheDownloadTime(),
"cache_result_query_length_served", resultCache.CacheQueryLengthServed(),
}...)

logValues = append(logValues, tagsToKeyValues(queryTags)...)
Expand Down
4 changes: 3 additions & 1 deletion pkg/querier/queryrange/instant_metric_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ func (cfg *InstantMetricCacheConfig) Validate() error {
return cfg.ResultsCacheConfig.Validate()
}

type instantMetricExtractor struct{}

func NewInstantMetricCacheMiddleware(
log log.Logger,
limits Limits,
Expand All @@ -67,7 +69,7 @@ func NewInstantMetricCacheMiddleware(
InstantMetricSplitter{limits, transformer},
limits,
merger,
queryrangebase.PrometheusResponseExtractor{},
PrometheusExtractor{},
cacheGenNumberLoader,
func(ctx context.Context, r queryrangebase.Request) bool {
if shouldCache != nil && !shouldCache(ctx, r) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/querier/queryrange/split_by_range.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) (
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}

interval := validation.SmallestPositiveNonZeroDurationPerTenant(tenants, s.limits.QuerySplitDuration)
interval := validation.SmallestPositiveNonZeroDurationPerTenant(tenants, s.limits.InstantMetricQuerySplitDuration)
// if no interval configured, continue to the next middleware
if interval == 0 {
return s.next.Do(ctx, request)
Expand Down

0 comments on commit 38e71d6

Please sign in to comment.