Skip to content

Commit

Permalink
[exporter/debug] format metric data points as one-liners in normal
Browse files Browse the repository at this point in the history
…verbosity (open-telemetry#10462)

#### Description

This pull request is part of
open-telemetry#7806;
it implements the change for metrics. The changes for
[logs](open-telemetry#10225)
and
[traces](open-telemetry#10280)
have been proposed in separate pull requests.

This change applies to the Debug exporter only. The behavior of the
Logging exporter remains unchanged. To use this behavior, switch from
the deprecated Logging exporter to Debug exporter.

#### Link to tracking issue

- open-telemetry#7806

#### Testing

Added unit tests for the formatter.

#### Documentation

Described the formatting in the Debug exporter's README.
  • Loading branch information
andrzej-stencel authored Jun 24, 2024
1 parent 3364ba1 commit 1d1ff4a
Show file tree
Hide file tree
Showing 6 changed files with 280 additions and 36 deletions.
25 changes: 0 additions & 25 deletions .chloggen/debug-exporter-normal-verbosity-traces.yaml

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ change_type: enhancement
component: exporter/debug

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: In `normal` verbosity, display one line of text for each log record
note: In `normal` verbosity, display one line of text for each telemetry record (log, data point, span)

# One or more tracking issues or pull requests related to the change
issues: [7806]
Expand Down
13 changes: 5 additions & 8 deletions exporter/debugexporter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,17 +67,12 @@ With `verbosity: normal`, the exporter outputs about one line for each telemetry
The "one line per telemetry record" is not a strict rule.
For example, logs with multiline body will be output as multiple lines.

> [!IMPORTANT]
> Currently the `normal` verbosity is only implemented for logs and traces.
> Metrics are going to be implemented in the future.
> The current behavior for metrics is the same as in `basic` verbosity.

Here's an example output:

```console
2024-05-31T13:26:37.531+0200 info TracesExporter {"kind": "exporter", "data_type": "traces", "name": "debug", "resource spans": 1, "spans": 2}
2024-05-31T13:26:37.531+0200 info okey-dokey-0 082bc2f70f519e32a39fd26ae69b43c0 51201084f4d65159
lets-go 082bc2f70f519e32a39fd26ae69b43c0 cd321682f3514378
2024-06-24T15:18:58.559+0200 info TracesExporter {"kind": "exporter", "data_type": "traces", "name": "debug", "resource spans": 1, "spans": 2}
2024-06-24T15:18:58.559+0200 info okey-dokey-0 4bdc558f0f0650e3ccaac8f3ae133954 8b69459f015c164b net.peer.ip=1.2.3.4 peer.service=telemetrygen-client
lets-go 4bdc558f0f0650e3ccaac8f3ae133954 8820ee5366817639 net.peer.ip=1.2.3.4 peer.service=telemetrygen-server
{"kind": "exporter", "data_type": "traces", "name": "debug"}
```

Expand Down Expand Up @@ -128,3 +123,5 @@ Attributes:
## Warnings

- Unstable Output Format: The output formats for all verbosity levels is not guaranteed and may be changed at any time without a breaking change.

[telemetrygen]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/cmd/telemetrygen
7 changes: 5 additions & 2 deletions exporter/debugexporter/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,22 @@ type debugExporter struct {

func newDebugExporter(logger *zap.Logger, verbosity configtelemetry.Level) *debugExporter {
var logsMarshaler plog.Marshaler
var metricsMarshaler pmetric.Marshaler
var tracesMarshaler ptrace.Marshaler
if verbosity == configtelemetry.LevelDetailed {
logsMarshaler = otlptext.NewTextLogsMarshaler()
metricsMarshaler = otlptext.NewTextMetricsMarshaler()
tracesMarshaler = otlptext.NewTextTracesMarshaler()
} else {
logsMarshaler = normal.NewNormalLogsMarshaler()
metricsMarshaler = normal.NewNormalMetricsMarshaler()
tracesMarshaler = normal.NewNormalTracesMarshaler()
}
return &debugExporter{
verbosity: verbosity,
logger: logger,
logsMarshaler: logsMarshaler,
metricsMarshaler: otlptext.NewTextMetricsMarshaler(),
metricsMarshaler: metricsMarshaler,
tracesMarshaler: tracesMarshaler,
}
}
Expand All @@ -68,7 +71,7 @@ func (s *debugExporter) pushMetrics(_ context.Context, md pmetric.Metrics) error
zap.Int("resource metrics", md.ResourceMetrics().Len()),
zap.Int("metrics", md.MetricCount()),
zap.Int("data points", md.DataPointCount()))
if s.verbosity != configtelemetry.LevelDetailed {
if s.verbosity == configtelemetry.LevelBasic {
return nil
}

Expand Down
149 changes: 149 additions & 0 deletions exporter/debugexporter/internal/normal/metrics.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package normal // import "go.opentelemetry.io/collector/exporter/debugexporter/internal/normal"

import (
"bytes"
"fmt"
"strings"

"go.opentelemetry.io/collector/pdata/pmetric"
)

type normalMetricsMarshaler struct{}

// Ensure normalMetricsMarshaller implements interface pmetric.Marshaler
var _ pmetric.Marshaler = normalMetricsMarshaler{}

// NewNormalMetricsMarshaler returns a pmetric.Marshaler for normal verbosity. It writes one line of text per log record
func NewNormalMetricsMarshaler() pmetric.Marshaler {
return normalMetricsMarshaler{}
}

func (normalMetricsMarshaler) MarshalMetrics(md pmetric.Metrics) ([]byte, error) {
var buffer bytes.Buffer
for i := 0; i < md.ResourceMetrics().Len(); i++ {
resourceMetrics := md.ResourceMetrics().At(i)
for j := 0; j < resourceMetrics.ScopeMetrics().Len(); j++ {
scopeMetrics := resourceMetrics.ScopeMetrics().At(j)
for k := 0; k < scopeMetrics.Metrics().Len(); k++ {
metric := scopeMetrics.Metrics().At(k)

var dataPointLines []string
switch metric.Type() {
case pmetric.MetricTypeGauge:
dataPointLines = writeNumberDataPoints(metric, metric.Gauge().DataPoints())
case pmetric.MetricTypeSum:
dataPointLines = writeNumberDataPoints(metric, metric.Sum().DataPoints())
case pmetric.MetricTypeHistogram:
dataPointLines = writeHistogramDataPoints(metric)
case pmetric.MetricTypeExponentialHistogram:
dataPointLines = writeExponentialHistogramDataPoints(metric)
case pmetric.MetricTypeSummary:
dataPointLines = writeSummaryDataPoints(metric)
}
for _, line := range dataPointLines {
buffer.WriteString(line)
}
}
}
}
return buffer.Bytes(), nil
}

func writeNumberDataPoints(metric pmetric.Metric, dataPoints pmetric.NumberDataPointSlice) (lines []string) {
for i := 0; i < dataPoints.Len(); i++ {
dataPoint := dataPoints.At(i)
dataPointAttributes := writeAttributes(dataPoint.Attributes())

var value string
switch dataPoint.ValueType() {
case pmetric.NumberDataPointValueTypeInt:
value = fmt.Sprintf("%v", dataPoint.IntValue())
case pmetric.NumberDataPointValueTypeDouble:
value = fmt.Sprintf("%v", dataPoint.DoubleValue())
}

dataPointLine := fmt.Sprintf("%s{%s} %s\n", metric.Name(), strings.Join(dataPointAttributes, ","), value)
lines = append(lines, dataPointLine)
}
return lines
}

func writeHistogramDataPoints(metric pmetric.Metric) (lines []string) {
for i := 0; i < metric.Histogram().DataPoints().Len(); i++ {
dataPoint := metric.Histogram().DataPoints().At(i)
dataPointAttributes := writeAttributes(dataPoint.Attributes())

var value string
value = fmt.Sprintf("count=%d", dataPoint.Count())
if dataPoint.HasSum() {
value += fmt.Sprintf(" sum=%v", dataPoint.Sum())
}
if dataPoint.HasMin() {
value += fmt.Sprintf(" min=%v", dataPoint.Min())
}
if dataPoint.HasMax() {
value += fmt.Sprintf(" max=%v", dataPoint.Max())
}

for bucketIndex := 0; bucketIndex < dataPoint.BucketCounts().Len(); bucketIndex++ {
bucketBound := ""
if bucketIndex < dataPoint.ExplicitBounds().Len() {
bucketBound = fmt.Sprintf("le%v=", dataPoint.ExplicitBounds().At(bucketIndex))
}
bucketCount := dataPoint.BucketCounts().At(bucketIndex)
value += fmt.Sprintf(" %s%d", bucketBound, bucketCount)
}

dataPointLine := fmt.Sprintf("%s{%s} %s\n", metric.Name(), strings.Join(dataPointAttributes, ","), value)
lines = append(lines, dataPointLine)
}
return lines
}

func writeExponentialHistogramDataPoints(metric pmetric.Metric) (lines []string) {
for i := 0; i < metric.ExponentialHistogram().DataPoints().Len(); i++ {
dataPoint := metric.ExponentialHistogram().DataPoints().At(i)
dataPointAttributes := writeAttributes(dataPoint.Attributes())

var value string
value = fmt.Sprintf("count=%d", dataPoint.Count())
if dataPoint.HasSum() {
value += fmt.Sprintf(" sum=%v", dataPoint.Sum())
}
if dataPoint.HasMin() {
value += fmt.Sprintf(" min=%v", dataPoint.Min())
}
if dataPoint.HasMax() {
value += fmt.Sprintf(" max=%v", dataPoint.Max())
}

// TODO display buckets

dataPointLine := fmt.Sprintf("%s{%s} %s\n", metric.Name(), strings.Join(dataPointAttributes, ","), value)
lines = append(lines, dataPointLine)
}
return lines
}

func writeSummaryDataPoints(metric pmetric.Metric) (lines []string) {
for i := 0; i < metric.Summary().DataPoints().Len(); i++ {
dataPoint := metric.Summary().DataPoints().At(i)
dataPointAttributes := writeAttributes(dataPoint.Attributes())

var value string
value = fmt.Sprintf("count=%d", dataPoint.Count())
value += fmt.Sprintf(" sum=%f", dataPoint.Sum())

for quantileIndex := 0; quantileIndex < dataPoint.QuantileValues().Len(); quantileIndex++ {
quantile := dataPoint.QuantileValues().At(quantileIndex)
value += fmt.Sprintf(" q%v=%v", quantile.Quantile(), quantile.Value())
}

dataPointLine := fmt.Sprintf("%s{%s} %s\n", metric.Name(), strings.Join(dataPointAttributes, ","), value)
lines = append(lines, dataPointLine)
}
return lines
}
120 changes: 120 additions & 0 deletions exporter/debugexporter/internal/normal/metrics_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package normal

import (
"testing"

"github.com/stretchr/testify/assert"

"go.opentelemetry.io/collector/pdata/pmetric"
)

func TestMarshalMetrics(t *testing.T) {
tests := []struct {
name string
input pmetric.Metrics
expected string
}{
{
name: "empty metrics",
input: pmetric.NewMetrics(),
expected: "",
},
{
name: "sum data point",
input: func() pmetric.Metrics {
metrics := pmetric.NewMetrics()
metric := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()
metric.SetName("system.cpu.time")
dataPoint := metric.SetEmptySum().DataPoints().AppendEmpty()
dataPoint.SetDoubleValue(123.456)
dataPoint.Attributes().PutStr("state", "user")
dataPoint.Attributes().PutStr("cpu", "0")
return metrics
}(),
expected: `system.cpu.time{state=user,cpu=0} 123.456
`,
},
{
name: "gauge data point",
input: func() pmetric.Metrics {
metrics := pmetric.NewMetrics()
metric := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()
metric.SetName("system.cpu.utilization")
dataPoint := metric.SetEmptyGauge().DataPoints().AppendEmpty()
dataPoint.SetDoubleValue(78.901234567)
dataPoint.Attributes().PutStr("state", "free")
dataPoint.Attributes().PutStr("cpu", "8")
return metrics
}(),
expected: `system.cpu.utilization{state=free,cpu=8} 78.901234567
`,
},
{
name: "histogram",
input: func() pmetric.Metrics {
metrics := pmetric.NewMetrics()
metric := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()
metric.SetName("http.server.request.duration")
dataPoint := metric.SetEmptyHistogram().DataPoints().AppendEmpty()
dataPoint.Attributes().PutInt("http.response.status_code", 200)
dataPoint.Attributes().PutStr("http.request.method", "GET")
dataPoint.ExplicitBounds().FromRaw([]float64{0.125, 0.5, 1, 3})
dataPoint.BucketCounts().FromRaw([]uint64{1324, 13, 0, 2, 1})
dataPoint.SetCount(1340)
dataPoint.SetSum(99.573)
dataPoint.SetMin(0.017)
dataPoint.SetMax(8.13)
return metrics
}(),
expected: `http.server.request.duration{http.response.status_code=200,http.request.method=GET} count=1340 sum=99.573 min=0.017 max=8.13 le0.125=1324 le0.5=13 le1=0 le3=2 1
`,
},
{
name: "exponential histogram",
input: func() pmetric.Metrics {
metrics := pmetric.NewMetrics()
metric := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()
metric.SetName("http.server.request.duration")
dataPoint := metric.SetEmptyExponentialHistogram().DataPoints().AppendEmpty()
dataPoint.Attributes().PutInt("http.response.status_code", 200)
dataPoint.Attributes().PutStr("http.request.method", "GET")
dataPoint.SetCount(1340)
dataPoint.SetSum(99.573)
dataPoint.SetMin(0.017)
dataPoint.SetMax(8.13)
return metrics
}(),
expected: `http.server.request.duration{http.response.status_code=200,http.request.method=GET} count=1340 sum=99.573 min=0.017 max=8.13
`,
},
{
name: "summary",
input: func() pmetric.Metrics {
metrics := pmetric.NewMetrics()
metric := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()
metric.SetName("summary")
dataPoint := metric.SetEmptySummary().DataPoints().AppendEmpty()
dataPoint.Attributes().PutInt("http.response.status_code", 200)
dataPoint.Attributes().PutStr("http.request.method", "GET")
dataPoint.SetCount(1340)
dataPoint.SetSum(99.573)
quantile := dataPoint.QuantileValues().AppendEmpty()
quantile.SetQuantile(0.01)
quantile.SetValue(15)
return metrics
}(),
expected: `summary{http.response.status_code=200,http.request.method=GET} count=1340 sum=99.573000 q0.01=15
`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
output, err := NewNormalMetricsMarshaler().MarshalMetrics(tt.input)
assert.NoError(t, err)
assert.Equal(t, tt.expected, string(output))
})
}
}

0 comments on commit 1d1ff4a

Please sign in to comment.