Skip to content

Commit

Permalink
Merge pull request #20 from Alb0t/ala-breaking-change-move-to-prom-hi…
Browse files Browse the repository at this point in the history
…stograms

Ala breaking change move to prom histograms
  • Loading branch information
Alb0t authored Oct 6, 2023
2 parents dd5a9cd + 3b3a82d commit e37d516
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 29 deletions.
13 changes: 7 additions & 6 deletions .goreleaser.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@ builds:
- env:
- CGO_ENABLED=0
archives:
- replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
- name_template: >-
{{- .ProjectName }}_
{{- title .Os }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}v{{ .Arm }}{{ end -}}
checksum:
name_template: 'checksums.txt'
snapshot:
Expand Down
81 changes: 69 additions & 12 deletions collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,19 @@ import (
"flag"
"io/ioutil"
"os"
"strconv"
"strings"

"github.com/carlescere/scheduler"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
)

var buildVersion = "3.1.2"
var buildVersion = "4.0.2"
var configFile = flag.String("configFile", "/etc/ttl-aerospike-exporter.yaml", "The yaml config file for the exporter")
var ns_set_to_histograms = make(map[string]map[string]*prometheus.HistogramVec)
var ns_set_to_ttl_unit = make(map[string]map[string]int)

var buildInfo = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Expand Down Expand Up @@ -70,9 +73,9 @@ type monconf struct {
Recordcount int `yaml:"recordCount,omitempty"`
ScanPercent float64 `yaml:"scanPercent,omitempty"`
NumberOfBucketsToExport int `yaml:"numberOfBucketsToExport,omitempty"`
BucketWidth int `yaml:"bucketWidth,omitempty"`
BucketStart int `yaml:"bucketStart,omitempty"`
StaticBucketList []float64 `yaml:"staticBucketList,omitempty"`
BucketWidth string `yaml:"bucketWidth,omitempty"`
BucketStart string `yaml:"bucketStart,omitempty"`
StaticBucketList []string `yaml:"staticBucketList,omitempty"`
ReportCount int `yaml:"reportCount,omitempty"`
ScanTotalTimeout string `yaml:"scanTotalTimeout"`
ScanSocketTimeout string `yaml:"scanSocketTimeout"`
Expand All @@ -95,6 +98,51 @@ func (c *conf) setConf() {
}
}

func parseTimeValues(arr []string) ([]float64, string, int) {
if len(arr) == 0 {
log.Fatal("Empty static bucket list?")
}

// Extract the unit from the first string to ensure consistency
unit := arr[0][len(arr[0])-1:]

// Check all strings in the array to ensure they use the same unit
for _, s := range arr {
if !strings.HasSuffix(s, string(unit)) {
log.Fatal("Only 1 time suffix supported at a time, cannot be mixed.")
}
}

// Parse the numerical parts
var values []float64
for _, s := range arr {
val, err := strconv.ParseFloat(s[:len(s)-1], 64)
if err != nil {
log.Fatal("String conversion to float failure")
}
values = append(values, val)
}

// Convert the unit to its descriptive form
var unitDesc string
var secondsPerUnit int
switch unit {
case "d":
unitDesc = "days"
secondsPerUnit = 86400
case "h":
unitDesc = "hours"
secondsPerUnit = 3600
case "s":
unitDesc = "seconds"
secondsPerUnit = 1
default:
log.Fatal("Unknown unit used")
}

return values, unitDesc, secondsPerUnit
}

func init() {
config.setConf()
log.SetFormatter(&log.TextFormatter{
Expand All @@ -113,23 +161,29 @@ func init() {
histogramConf := config.Monitor[histogramConfIndex]
namespace := histogramConf.Namespace
set := histogramConf.Set
var buckets []float64
number_of_buckets := histogramConf.NumberOfBucketsToExport
bucket_width := float64(histogramConf.BucketWidth)
bucket_start := float64(histogramConf.BucketStart)

var buckets []float64
var unit_modifier int
var ttl_unit string
// buckets definitions
if len(histogramConf.StaticBucketList) > 0 {
if number_of_buckets != 0 || bucket_width != 0 { // cant check that bucket_start is not 0 because thats a reasonable start value.
if number_of_buckets != 0 || histogramConf.BucketWidth != "" { // cant check that bucket_start is not 0 because thats a reasonable start value.
log.Fatalf("Static list of buckets chosen for %s.%s but bucket count or bucket width defined.", namespace, set)
}
// should be using static buckets if we are still here.
buckets = histogramConf.StaticBucketList

// drop "d", "s", "h"
buckets, ttl_unit, unit_modifier = parseTimeValues(histogramConf.StaticBucketList)
} else {
var start_and_width []float64
start_and_width, ttl_unit, unit_modifier = parseTimeValues([]string{histogramConf.BucketStart, histogramConf.BucketWidth})
bucket_start := start_and_width[0]
bucket_width := start_and_width[1]
buckets = prometheus.LinearBuckets(bucket_start, bucket_width, number_of_buckets)
}

histograms := make(map[string]*prometheus.HistogramVec)
ttl_units := make(map[string]int)

if histogramConf.KByteHistogram["deviceSize"] || histogramConf.KByteHistogram["memorySize"] {
expirationTTLBytesHist := prometheus.NewHistogramVec(
Expand All @@ -138,11 +192,12 @@ func init() {
Name: "kib_hist",
Help: "Histogram of how many bytes fall into each ttl bucket. Memory will be the in-memory data size and does not include PI or SI.",
Buckets: buckets,
ConstLabels: prometheus.Labels{"namespace": namespace, "set": set},
ConstLabels: prometheus.Labels{"namespace": namespace, "set": set, "ttlUnit": ttl_unit},
}, []string{"storage_type"},
)
prometheus.MustRegister(expirationTTLBytesHist)
histograms["bytes"] = expirationTTLBytesHist
ttl_units["modifier"] = unit_modifier
}

if true {
Expand All @@ -152,15 +207,17 @@ func init() {
Name: "counts_hist",
Help: "Histogram of how many records fall into each ttl bucket.",
Buckets: buckets,
ConstLabels: prometheus.Labels{"namespace": namespace, "set": set},
ConstLabels: prometheus.Labels{"namespace": namespace, "set": set, "ttlUnit": ttl_unit},
}, []string{},
)
prometheus.MustRegister(expirationTTLCountsHist)
histograms["counts"] = expirationTTLCountsHist
ttl_units["modifier"] = unit_modifier
}

// Add the HistogramVec to the inner map
ns_set_to_histograms[namespace+":"+set] = histograms
ns_set_to_ttl_unit[namespace+":"+set] = ttl_units

//now we can call something like ns_set_to_histograms[mynamespace_myset].Observe in the future.
}
Expand Down
22 changes: 12 additions & 10 deletions conf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,31 +20,33 @@ monitor:
policyTotalTimeout: 20m # https://golang.org/pkg/time/#ParseDuration
policySocketTimeout: 20m # https://golang.org/pkg/time/#ParseDuration
RecordsPerSecond: 0 # Limit the records per second returned by Aerospike server to the exporter. 0 means no limit.
staticBucketList: #these are in seconds, so the below sets up 4 buckets. 160d bucket, 170, 181, and 220d.
- 13824000 #160*86400
- 14688000 #170*86400
- 15638400 #181*86400
- 19008000 #220*86400
# width/start/numberOfBucketsToExport can only be used if staticBucketList is undefined, and vice versa.
staticBucketList: #You can append suffix "s", "h", "d". Do not mix suffixes per array.
- 160d
- 170d
- 181d
- 220d
- namespace: someothernamespace
set: null # what setname to scan? use `null` (without ticks) to just report on a namespace level
recordCount: -1 # How many records to stop scanning at? Pass '-recordCount=-1' to only use scanPercent.
scanPercent: 1.0 # what percentage of data to scan? Set this to 0 or -1 to only rely on recordCount
bucketWidth: 864000 #10-day width (seconds)
bucketStart: 120960000 #140 (seconds)
numberOfBucketsToExport: 10 #140+ 10*10=up to 240
# width/start/numberOfBucketsToExport can only be used if staticBucketList is undefined, and vice versa.
bucketWidth: 10d #You can append suffix "s", "h", "d". Do not use a different suffix than width, or it will throw an exception.
bucketStart: 180d #You can append suffix "s", "h", "d". Do not use a different suffix than width, or it will throw an exception.
numberOfBucketsToExport: 10 # your last bucket will be: bucketStart + (bucketWidth * numberOfBucketsToExporter)
reportCount: 300000 # if running verbose, How many records should be report on? Every <x> records will cause an entry in the stdout (default 300000)
scanTotalTimeout: 20m # this and other timeout fields are parsed using golang time.parseduration so values like (20m1s, 2h, 1s, 200ms) can be used
scanSocketTimeout: 20m # https://golang.org/pkg/time/#ParseDuration
policyTotalTimeout: 20m # https://golang.org/pkg/time/#ParseDuration
policySocketTimeout: 20m # https://golang.org/pkg/time/#ParseDuration
RecordsPerSecond: 100 # not sure if this works on older versions, but it does right now as of v5. 0 means no limit.
RecordsPerSecond: 100 # Utilizes the scan policy to limit RPS.
## KiB exports
# note on 'resolution here: this can have drastic perf implications
# This directly affects how many times we call histogram.Observe.
# ex. If you have a ~128,000 byte size record and resolution is set to 0.001 we will call observe 128,000 times.
# this does mean we lose some resolution on how large the records are, because they'll be rounded down by 'n' bytes.
# value is in KiB, recommend starting at something like 0.334 so our resolution will be around 334 bytes
kbyteHistogramResolution: 0.334
kbyteHistogramResolution: 0.334
kbyteHistogram:
deviceSize: true
memorySize: false
2 changes: 1 addition & 1 deletion stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ func updateStats(namespace string, set string, namespaceSet string, element monc
// too noisy disabled logging on this
} else {
total++
expireTime := rec.Record.Expiration
expireTime := rec.Record.Expiration / uint32(ns_set_to_ttl_unit[namespaceSet]["modifier"])
ns_set_to_histograms[namespaceSet]["counts"].WithLabelValues().Observe(float64(expireTime))

// handle byte histogram
Expand Down

0 comments on commit e37d516

Please sign in to comment.