Skip to content

Commit

Permalink
Implement running jobs metric + update metrics code
Browse files Browse the repository at this point in the history
  • Loading branch information
nikita-tkachenko-datadog committed Apr 1, 2024
1 parent 3e74749 commit a03b079
Show file tree
Hide file tree
Showing 23 changed files with 303 additions and 352 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ of this software and associated documentation files (the "Software"), to deal
import com.timgroup.statsd.ServiceCheck;
import java.util.Map;
import java.util.Set;
import org.datadog.jenkins.plugins.datadog.clients.Metrics;
import org.datadog.jenkins.plugins.datadog.metrics.MetricsClient;
import org.datadog.jenkins.plugins.datadog.traces.write.TraceWriteStrategy;

public interface DatadogClient {
Expand Down Expand Up @@ -69,23 +69,7 @@ public ServiceCheck.Status toServiceCheckStatus(){
*/
boolean event(DatadogEvent event);

/**
* Increment a counter for the given metrics.
* NOTE: To submit all counters you need to execute the flushCounters method.
* This is to aggregate counters and submit them in batch to Datadog in order to minimize network traffic.
* @param name - metric name
* @param hostname - metric hostname
* @param tags - metric tags
* @return a boolean to signify the success or failure of increment submission.
*/
boolean incrementCounter(String name, String hostname, Map<String, Set<String>> tags);

/**
* Submit all your counters as rate with 10 seconds intervals.
*/
void flushCounters();

Metrics metrics();
MetricsClient metrics();

/**
* Sends a service check to the Datadog API, including the check name, and status.
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ of this software and associated documentation files (the "Software"), to deal
import org.datadog.jenkins.plugins.datadog.DatadogEvent;
import org.datadog.jenkins.plugins.datadog.DatadogGlobalConfiguration;
import org.datadog.jenkins.plugins.datadog.DatadogUtilities;
import org.datadog.jenkins.plugins.datadog.metrics.MetricsClient;
import org.datadog.jenkins.plugins.datadog.traces.mapper.JsonTraceSpanMapper;
import org.datadog.jenkins.plugins.datadog.traces.write.AgentTraceWriteStrategy;
import org.datadog.jenkins.plugins.datadog.traces.write.Payload;
Expand Down Expand Up @@ -399,35 +400,13 @@ public boolean event(DatadogEvent event) {
}

@Override
public boolean incrementCounter(String name, String hostname, Map<String, Set<String>> tags) {
try {
boolean status = reinitializeStatsDClient(false);
if(!status){
return false;
}
logger.fine("increment counter with dogStatD client");
this.statsd.incrementCounter(name, TagsUtil.convertTagsToArray(tags));
return true;
} catch(Exception e){
DatadogUtilities.severe(logger, e, "Failed to increment counter with DogStatsD");
reinitializeStatsDClient(true);
return false;
}
}

@Override
public void flushCounters() {
return; //noop
}

@Override
public Metrics metrics() {
public MetricsClient metrics() {
return new AgentMetrics();
}

private final class AgentMetrics implements Metrics {
private final class AgentMetrics implements MetricsClient {
@Override
public void gauge(String name, long value, String hostname, Map<String, Set<String>> tags) {
public void gauge(String name, double value, String hostname, Map<String, Set<String>> tags) {
try {
boolean status = reinitializeStatsDClient(false);
if (!status) {
Expand All @@ -441,6 +420,21 @@ public void gauge(String name, long value, String hostname, Map<String, Set<Stri
}
}

@Override
public void rate(String name, double value, String hostname, Map<String, Set<String>> tags) {
try {
boolean status = reinitializeStatsDClient(false);
if(!status){
return;
}
logger.fine("increment counter with dogStatD client");
statsd.count(name, value, TagsUtil.convertTagsToArray(tags));
} catch(Exception e){
DatadogUtilities.severe(logger, e, "Failed to increment counter with DogStatsD");
reinitializeStatsDClient(true);
}
}

@Override
public void close() throws Exception {
// no op
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ of this software and associated documentation files (the "Software"), to deal
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import net.sf.json.JSON;
Expand All @@ -46,6 +45,7 @@ of this software and associated documentation files (the "Software"), to deal
import org.datadog.jenkins.plugins.datadog.DatadogEvent;
import org.datadog.jenkins.plugins.datadog.DatadogGlobalConfiguration;
import org.datadog.jenkins.plugins.datadog.DatadogUtilities;
import org.datadog.jenkins.plugins.datadog.metrics.MetricsClient;
import org.datadog.jenkins.plugins.datadog.traces.write.Payload;
import org.datadog.jenkins.plugins.datadog.traces.write.TraceWriteStrategy;
import org.datadog.jenkins.plugins.datadog.traces.write.TraceWriteStrategyImpl;
Expand Down Expand Up @@ -258,65 +258,34 @@ public boolean event(DatadogEvent event) {
}

@Override
public boolean incrementCounter(String name, String hostname, Map<String, Set<String>> tags) {
if(this.defaultIntakeConnectionBroken){
logger.severe("Your client is not initialized properly");
return false;
}
ConcurrentMetricCounters.getInstance().increment(name, hostname, tags);
return true;
}

@Override
public void flushCounters() {
ConcurrentMap<CounterMetric, Integer> counters = ConcurrentMetricCounters.getInstance().getAndReset();

logger.fine("Run flushCounters method");
try (HttpMetrics metrics = metrics()) {
// Submit all metrics as gauge
for (Map.Entry<CounterMetric, Integer> entry : counters.entrySet()) {
CounterMetric counterMetric = entry.getKey();
int count = entry.getValue();
logger.fine("Flushing: " + counterMetric.getMetricName() + " - " + count);

metrics.rate(
counterMetric.getMetricName(), count,
counterMetric.getHostname(),
counterMetric.getTags());
}
} catch (Exception e) {
DatadogUtilities.severe(logger, e, "Failed to flush counters");
}
public MetricsClient metrics() {
return new ApiMetrics();
}

@Override
public HttpMetrics metrics() {
return new HttpMetrics();
}

private final class HttpMetrics implements Metrics {
private final class ApiMetrics implements MetricsClient {
// when we submit a rate we need to divide the submitted value by the interval (10)
private static final int RATE_INTERVAL = 10;

private final JSONArray series = new JSONArray();
private final long timestamp = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());

@Override
public void gauge(String name, long value, String hostname, Map<String, Set<String>> tags) {
public void gauge(String name, double value, String hostname, Map<String, Set<String>> tags) {
addMetric(name, value, hostname, tags, "gauge");
}

public void rate(String name, float value, String hostname, Map<String, Set<String>> tags) {
@Override
public void rate(String name, double value, String hostname, Map<String, Set<String>> tags) {
addMetric(name, value, hostname, tags, "rate");
}

private void addMetric(String name, float value, String hostname, Map<String, Set<String>> tags, String type) {
private void addMetric(String name, double value, String hostname, Map<String, Set<String>> tags, String type) {
logger.fine(String.format("Sending metric '%s' with value %s", name, value));

JSONArray point = new JSONArray();
point.add(timestamp);
if (type.equals("rate")) {
point.add(value / (float) RATE_INTERVAL);
point.add(value / RATE_INTERVAL);
} else {
point.add(value);
}
Expand Down

This file was deleted.

Loading

0 comments on commit a03b079

Please sign in to comment.