Skip to content

Commit

Permalink
test(models): Cleanup tests, unexport stuff and unify naming in tests (
Browse files Browse the repository at this point in the history
…#16116)

(cherry picked from commit ddec937)
  • Loading branch information
srebhan committed Nov 18, 2024
1 parent 58f12d5 commit e752c96
Show file tree
Hide file tree
Showing 12 changed files with 835 additions and 788 deletions.
4 changes: 4 additions & 0 deletions models/buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,11 @@ type Buffer interface {
// as unsent.
Reject([]telegraf.Metric)

// Stats returns the buffer statistics such as rejected, dropped and accepred metrics
Stats() BufferStats

// Close finalizes the buffer and closes all open resources
Close() error
}

// BufferStats holds common metrics used for buffer implementations.
Expand Down
4 changes: 4 additions & 0 deletions models/buffer_disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,10 @@ func (b *DiskBuffer) Stats() BufferStats {
return b.BufferStats
}

func (b *DiskBuffer) Close() error {
return b.file.Close()
}

func (b *DiskBuffer) resetBatch() {
b.batchFirst = 0
b.batchSize = 0
Expand Down
84 changes: 34 additions & 50 deletions models/buffer_disk_test.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package models

import (
"os"
"path/filepath"
"testing"
"time"
Expand All @@ -14,66 +13,37 @@ import (
"github.com/influxdata/telegraf/testutil"
)

func newTestDiskBuffer(t testing.TB) Buffer {
path, err := os.MkdirTemp("", "*-buffer-test")
require.NoError(t, err)
return newTestDiskBufferWithPath(t, "test", path)
}
func TestDiskBufferRetainsTrackingInformation(t *testing.T) {
m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0))

var delivered int
mm, _ := metric.WithTracking(m, func(telegraf.DeliveryInfo) { delivered++ })

func newTestDiskBufferWithPath(t testing.TB, name string, path string) Buffer {
t.Helper()
buf, err := NewBuffer(name, "123", "", 0, "disk", path)
buf, err := NewBuffer("test", "123", "", 0, "disk", t.TempDir())
require.NoError(t, err)
buf.Stats().MetricsAdded.Set(0)
buf.Stats().MetricsWritten.Set(0)
buf.Stats().MetricsDropped.Set(0)
return buf
}
defer buf.Close()

func TestBuffer_RetainsTrackingInformation(t *testing.T) {
var delivered int
mm, _ := metric.WithTracking(Metric(), func(_ telegraf.DeliveryInfo) {
delivered++
})
b := newTestDiskBuffer(t)
b.Add(mm)
batch := b.Batch(1)
b.Accept(batch)
buf.Add(mm)

batch := buf.Batch(1)
buf.Accept(batch)
require.Equal(t, 1, delivered)
}

func TestBuffer_TrackingDroppedFromOldWal(t *testing.T) {
path, err := os.MkdirTemp("", "*-buffer-test")
require.NoError(t, err)
path = filepath.Join(path, "123")
walfile, err := wal.Open(path, nil)
require.NoError(t, err)

tm, _ := metric.WithTracking(Metric(), func(_ telegraf.DeliveryInfo) {})
func TestDiskBufferTrackingDroppedFromOldWal(t *testing.T) {
m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0))

tm, _ := metric.WithTracking(m, func(telegraf.DeliveryInfo) {})
metrics := []telegraf.Metric{
// Basic metric with 1 field, 0 timestamp
Metric(),
metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)),
// Basic metric with 1 field, different timestamp
metric.New(
"cpu",
map[string]string{},
map[string]interface{}{
"value": 20.0,
},
time.Now(),
),
metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 20.0}, time.Now()),
// Metric with a field
metric.New(
"cpu",
map[string]string{
"x": "y",
},
map[string]interface{}{
"value": 18.0,
},
time.Now(),
),
metric.New("cpu", map[string]string{"x": "y"}, map[string]interface{}{"value": 18.0}, time.Now()),
// Tracking metric
tm,
// Metric with lots of tag types
Expand All @@ -95,15 +65,29 @@ func TestBuffer_TrackingDroppedFromOldWal(t *testing.T) {
// call manually so that we can properly use metric.ToBytes() without having initialized a buffer
registerGob()

// Prefill the WAL file
path := t.TempDir()
walfile, err := wal.Open(filepath.Join(path, "123"), nil)
require.NoError(t, err)
defer walfile.Close()
for i, m := range metrics {
data, err := metric.ToBytes(m)
require.NoError(t, err)
require.NoError(t, walfile.Write(uint64(i+1), data))
}
walfile.Close()

// Create a buffer
buf, err := NewBuffer("123", "123", "", 0, "disk", path)
require.NoError(t, err)
buf.Stats().MetricsAdded.Set(0)
buf.Stats().MetricsWritten.Set(0)
buf.Stats().MetricsDropped.Set(0)
defer buf.Close()

batch := buf.Batch(4)

b := newTestDiskBufferWithPath(t, filepath.Base(path), filepath.Dir(path))
batch := b.Batch(4)
// expected skips the tracking metric
// Check that the tracking metric is skipped
expected := []telegraf.Metric{
metrics[0], metrics[1], metrics[2], metrics[4],
}
Expand Down
64 changes: 34 additions & 30 deletions models/buffer_mem.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,36 +36,6 @@ func (b *MemoryBuffer) Len() int {
return b.length()
}

func (b *MemoryBuffer) length() int {
return min(b.size+b.batchSize, b.cap)
}

func (b *MemoryBuffer) addMetric(m telegraf.Metric) int {
dropped := 0
// Check if Buffer is full
if b.size == b.cap {
b.metricDropped(b.buf[b.last])
dropped++

if b.batchSize > 0 {
b.batchSize--
b.batchFirst = b.next(b.batchFirst)
}
}

b.metricAdded()

b.buf[b.last] = m
b.last = b.next(b.last)

if b.size == b.cap {
b.first = b.next(b.first)
}

b.size = min(b.size+1, b.cap)
return dropped
}

func (b *MemoryBuffer) Add(metrics ...telegraf.Metric) int {
b.Lock()
defer b.Unlock()
Expand Down Expand Up @@ -149,10 +119,44 @@ func (b *MemoryBuffer) Reject(batch []telegraf.Metric) {
b.BufferSize.Set(int64(b.length()))
}

func (b *MemoryBuffer) Close() error {
return nil
}

func (b *MemoryBuffer) Stats() BufferStats {
return b.BufferStats
}

func (b *MemoryBuffer) length() int {
return min(b.size+b.batchSize, b.cap)
}

func (b *MemoryBuffer) addMetric(m telegraf.Metric) int {
dropped := 0
// Check if Buffer is full
if b.size == b.cap {
b.metricDropped(b.buf[b.last])
dropped++

if b.batchSize > 0 {
b.batchSize--
b.batchFirst = b.next(b.batchFirst)
}
}

b.metricAdded()

b.buf[b.last] = m
b.last = b.next(b.last)

if b.size == b.cap {
b.first = b.next(b.first)
}

b.size = min(b.size+1, b.cap)
return dropped
}

// next returns the next index with wrapping.
func (b *MemoryBuffer) next(index int) int {
index++
Expand Down
34 changes: 19 additions & 15 deletions models/buffer_mem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,38 +2,42 @@ package models

import (
"testing"
"time"

"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/require"
)

func newTestMemoryBuffer(t testing.TB, capacity int) Buffer {
t.Helper()
buf, err := NewBuffer("test", "123", "", capacity, "memory", "")
func TestMemoryBufferAcceptCallsMetricAccept(t *testing.T) {
buf, err := NewBuffer("test", "123", "", 5, "memory", "")
require.NoError(t, err)
buf.Stats().MetricsAdded.Set(0)
buf.Stats().MetricsWritten.Set(0)
buf.Stats().MetricsDropped.Set(0)
return buf
}
defer buf.Close()

func TestBuffer_AcceptCallsMetricAccept(t *testing.T) {
var accept int
mm := &MockMetric{
Metric: Metric(),
mm := &mockMetric{
Metric: metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0)),
AcceptF: func() {
accept++
},
}
b := newTestMemoryBuffer(t, 5)
b.Add(mm, mm, mm)
batch := b.Batch(2)
b.Accept(batch)
buf.Add(mm, mm, mm)
batch := buf.Batch(2)
buf.Accept(batch)
require.Equal(t, 2, accept)
}

func BenchmarkAddMetrics(b *testing.B) {
buf := newTestMemoryBuffer(b, 10000)
m := Metric()
func BenchmarkMemoryBufferAddMetrics(b *testing.B) {
buf, err := NewBuffer("test", "123", "", 10000, "memory", "")
require.NoError(b, err)
buf.Stats().MetricsAdded.Set(0)
buf.Stats().MetricsWritten.Set(0)
buf.Stats().MetricsDropped.Set(0)
defer buf.Close()

m := metric.New("cpu", map[string]string{}, map[string]interface{}{"value": 42.0}, time.Unix(0, 0))
for n := 0; n < b.N; n++ {
buf.Add(m)
}
Expand Down
Loading

0 comments on commit e752c96

Please sign in to comment.