Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(blooms): Add jsonnet lib for bloom filter components #12429

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
100 changes: 100 additions & 0 deletions production/ksonnet/loki/bloom-compactor.libsonnet
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
{
local k = import 'ksonnet-util/kausal.libsonnet',
local container = k.core.v1.container,
local containerPort = k.core.v1.containerPort,
local pvc = k.core.v1.persistentVolumeClaim,
local service = k.core.v1.service,
local statefulSet = k.apps.v1.statefulSet,
local volume = k.core.v1.volume,
local volumeMount = k.core.v1.volumeMount,

_config+:: {
bloom_compactor+: if !$._config.use_bloom_filters then {} else {
// TODO(salvacorts): Configure autoscaling
replicas: error 'missing replicas',
},

},

local name = 'bloom-compactor',
local volumeName = name + '-data',
local volumeMounts = [volumeMount.new(volumeName, '/data')],

// TODO(owen-d): removed PVCs when we can run the bloom-shipper in memory only
bloom_compactor_data_pvc::
if $._config.use_bloom_filters
then
pvc.new(volumeName)
// set disk size
+ pvc.mixin.spec.resources.withRequests({ storage: $._config.bloom_compactor.pvc_size })
// mount the volume as read-write by a single node
+ pvc.mixin.spec.withAccessModes(['ReadWriteOnce'])
// set persistent volume storage class
+ pvc.mixin.spec.withStorageClassName($._config.bloom_compactor.pvc_class)
else {},

bloom_compactor_args:: $._config.commonArgs {
target: 'bloom-compactor',
},

bloom_compactor_ports:: [
containerPort.new(name='http-metrics', port=$._config.http_listen_port),
containerPort.new(name='grpc', port=9095),
],

bloom_compactor_container:: if !$._config.use_bloom_filters then {} else
container.new(name, $._images.bloom_compactor)
// add default ports
+ container.withPorts($.bloom_compactor_ports)
// add target specific CLI arguments
+ container.withArgsMixin(k.util.mapToFlags($.bloom_compactor_args))
// add global environment variables
+ container.withEnvMixin($._config.commonEnvs)
// mount the data pvc at given mountpoint
+ container.withVolumeMountsMixin(volumeMounts)
// add HTTP readiness probe
+ container.mixin.readinessProbe.httpGet.withPath('/ready')
+ container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port)
+ container.mixin.readinessProbe.withTimeoutSeconds(1)
// TODO(salvacorts): Estimate the right values for resources
// define container resource requests
+ k.util.resourcesRequests('1', '4Gi')
// define container resource limits
+ k.util.resourcesLimits(null, '8Gi'),

bloom_compactor_statefulset: if !$._config.use_bloom_filters then {} else
statefulSet.new(name, $._config.bloom_compactor.replicas, [$.bloom_compactor_container])
+ statefulSet.spec.withVolumeClaimTemplatesMixin($.bloom_compactor_data_pvc)
// + statefulSet.mixin.spec.withVolumeClaimTemplatesMixin(volumeClaimTemplates)
// add clusterIP service
+ statefulSet.mixin.spec.withServiceName(name)
// perform rolling update when statefulset configuration changes
+ statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate')
// TODO(owen-d): enable this once supported (currently alpha)
// https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#rolling-updates
// allow 50% of pods to be unavailable during upgrades
// + statefulSet.mixin.spec.updateStrategy.rollingUpdate.withMaxUnavailable('50%') +

// launch or terminate pods in parallel, *does not* affect upgrades
+ statefulSet.mixin.spec.withPodManagementPolicy('Parallel')
// 10001 is the user/group ID assigned to Loki in the Dockerfile
+ statefulSet.mixin.spec.template.spec.securityContext.withRunAsUser(10001)
+ statefulSet.mixin.spec.template.spec.securityContext.withRunAsGroup(10001)
+ statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001)
// ensure statefulset is updated when loki config changes
+ $.config_hash_mixin
// ensure no other workloads are scheduled
+ k.util.antiAffinity
// mount the loki config.yaml
+ k.util.configVolumeMount('loki', '/etc/loki/config')
// mount the runtime overrides.yaml
+ k.util.configVolumeMount('overrides', '/etc/loki/overrides'),

bloom_compactor_service: if !$._config.use_bloom_filters then {} else
k.util.serviceFor($.bloom_compactor_statefulset, $._config.service_ignored_labels),

bloom_compactor_headless_service: if !$._config.use_bloom_filters then {} else
k.util.serviceFor($.bloom_compactor_statefulset, $._config.service_ignored_labels)
+ service.mixin.metadata.withName(name + '-headless')
+ service.mixin.spec.withClusterIp('None'),
}
130 changes: 130 additions & 0 deletions production/ksonnet/loki/bloom-gateway.libsonnet
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
{
local k = import 'ksonnet-util/kausal.libsonnet',
local container = k.core.v1.container,
local containerPort = k.core.v1.containerPort,
local pvc = k.core.v1.persistentVolumeClaim,
local service = k.core.v1.service,
local statefulSet = k.apps.v1.statefulSet,
local volume = k.core.v1.volume,
local volumeMount = k.core.v1.volumeMount,

_config+:: {
bloom_gateway+: if !$._config.use_bloom_filters then {} else {
// TODO(salvacorts): Configure autoscaling
replicas: error 'missing replicas',

////** Storage **////
// if true, the host needs to have local SSD disks mounted, otherwise PVCs are used
use_local_ssd: false,
// PVC config
pvc_size: if !self.use_local_ssd then error 'bloom_gateway.pvc_size needs to be defined when using PVC' else '',
pvc_class: if !self.use_local_ssd then error 'bloom_gateway.pvc_class needs to be defined when using PVC' else '',
// local SSD config
hostpaths: if self.use_local_ssd then error 'bloom_gateway.hostpaths needs to be defined when using local SSDs' else [],
node_selector: if self.use_local_ssd then error 'bloom_gateway.node_selector needs to be defined when using local SSDs' else {},
tolerations: if self.use_local_ssd then error 'bloom_gateway.tolerations needs to be defined when using local SSDs' else [],
},
},

local name = 'bloom-gateway',

local paths = std.range(0, std.length($._config.bloom_gateway.hostpaths) - 1),

local volumeNames = [
'%s-data-%d' % [name, x]
for x in paths
],

local volumes =
if $._config.bloom_gateway.use_local_ssd
then [
volume.fromHostPath(volumeNames[x], $._config.bloom_gateway.hostpaths[x])
for x in paths
]
else [],

local volumeMounts = [
volumeMount.new(volumeNames[x], '/data%d' % [x])
for x in paths
],
Comment on lines +46 to +49
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doesn't mount the PVC when not using SSDs

Suggested change
local volumeMounts = [
volumeMount.new(volumeNames[x], '/data%d' % [x])
for x in paths
],
local volumeMounts =
if $._config.bloom_gateway.use_local_ssd
then [
volumeMount.new(volumeNames[x], '/data%d' % [x])
for x in paths
]
else [volumeMount.new($.bloom_gateway_data_pvc.metadata.name, '/data')],


bloom_gateway_args:: $._config.commonArgs {
target: 'bloom-gateway',
},

bloom_gateway_ports:: [
containerPort.new(name='http-metrics', port=$._config.http_listen_port),
containerPort.new(name='grpc', port=9095),
],

bloom_gateway_data_pvc:: if !$._config.use_bloom_filters || !$._config.bloom_gateway.use_local_ssd then null else
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bug: PVC is null if you arent using local SSDs

Suggested change
bloom_gateway_data_pvc:: if !$._config.use_bloom_filters || !$._config.bloom_gateway.use_local_ssd then null else
bloom_gateway_data_pvc:: if !$._config.use_bloom_filters || $._config.bloom_gateway.use_local_ssd then null else

pvc.new('%s-data' % name)
// set disk size
+ pvc.mixin.spec.resources.withRequests({ storage: $._config.bloom_gateway.pvc_size })
// mount the volume as read-write by a single node
+ pvc.mixin.spec.withAccessModes(['ReadWriteOnce'])
// set persistent volume storage class
+ pvc.mixin.spec.withStorageClassName($._config.bloom_gateway.pvc_class),

bloom_gateway_container:: if !$._config.use_bloom_filters then {} else
container.new(name, $._images.bloom_gateway)
// add default ports
+ container.withPorts($.bloom_gateway_ports)
// add target specific CLI arguments
+ container.withArgsMixin(k.util.mapToFlags($.bloom_gateway_args))
// mount local SSD or PVC
+ container.withVolumeMountsMixin(volumeMounts)
// add globale environment variables
+ container.withEnvMixin($._config.commonEnvs)
// add HTTP readiness probe
+ container.mixin.readinessProbe.httpGet.withPath('/ready')
+ container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port)
+ container.mixin.readinessProbe.withTimeoutSeconds(1)
// define container resource requests
+ k.util.resourcesRequests('4', '4Gi')
// define container resource limits
+ k.util.resourcesLimits(null, '8Gi'),

bloom_gateway_statefulset: if !$._config.use_bloom_filters then {} else
statefulSet.new(name, $._config.bloom_gateway.replicas, [$.bloom_gateway_container])
// add clusterIP service
+ statefulSet.mixin.spec.withServiceName(name)
// perform rolling update when statefulset configuration changes
+ statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate')
// launch or terminate pods in parallel, *does not* affect upgrades
+ statefulSet.mixin.spec.withPodManagementPolicy('Parallel')
// 10001 is the user/group ID assigned to Loki in the Dockerfile
+ statefulSet.mixin.spec.template.spec.securityContext.withRunAsUser(10001)
+ statefulSet.mixin.spec.template.spec.securityContext.withRunAsGroup(10001)
+ statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001)
// ensure statefulset is updated when loki config changes
+ $.config_hash_mixin
// ensure no other workloads are scheduled
+ k.util.antiAffinity
// mount the loki config.yaml
+ k.util.configVolumeMount('loki', '/etc/loki/config')
// mount the runtime overrides.yaml
+ k.util.configVolumeMount('overrides', '/etc/loki/overrides')
// configuration specific to SSD/PVC usage
+ (
if $._config.bloom_gateway.use_local_ssd
then
// ensure the pod is scheduled on a node with local SSDs if needed
statefulSet.mixin.spec.template.spec.withNodeSelector($._config.bloom_gateway.node_selector)
// tolerate the local-ssd taint
+ statefulSet.mixin.spec.template.spec.withTolerationsMixin($._config.bloom_gateway.tolerations)
// mount the local SSDs
+ statefulSet.mixin.spec.template.spec.withVolumesMixin(volumes)
else
// create persistent volume claim
statefulSet.mixin.spec.withVolumeClaimTemplates([$.bloom_gateway_data_pvc])
),

bloom_gateway_service: if !$._config.use_bloom_filters then {} else
k.util.serviceFor($.bloom_gateway_statefulset, $._config.service_ignored_labels),

bloom_gateway_headless_service: if !$._config.use_bloom_filters then {} else
k.util.serviceFor($.bloom_gateway_statefulset, $._config.service_ignored_labels)
+ service.mixin.metadata.withName(name + '-headless')
+ service.mixin.spec.withClusterIp('None'),
}
8 changes: 8 additions & 0 deletions production/ksonnet/loki/bloomfilters.libsonnet
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
_config+:: {
// globally enable/disable bloom gateway and bloom compactor
use_bloom_filters: false,
},
}
+ (import 'bloom-compactor.libsonnet')
+ (import 'bloom-gateway.libsonnet')
3 changes: 3 additions & 0 deletions production/ksonnet/loki/loki.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
// Index Gateway support
(import 'index-gateway.libsonnet') +

// Accelerated search using bloom filters
(import 'bloomfilters.libsonnet') +

// BoltDB and TSDB Shipper support. Anything that modifies the compactor must be imported after this.
(import 'shipper.libsonnet') +

Expand Down
Loading