Skip to content

Commit

Permalink
Improve fuzzer outcome metrics (#4338)
Browse files Browse the repository at this point in the history
### Motivation

As part of the initiative to improve clusterfuzz monitoring, it is
necessary to enrich some metrics regarding fuzzing outcomes.

This PR adds the platform field to the following metrics:

- FUZZER_KNOWN_CRASH_COUNT
- FUZZER_NEW_CRASH_COUNT
- FUZZER_RETURN_CODE_COUNT
- FUZZER_TOTAL_FUZZ_TIME
- JOB_KNOWN_CRASH_COUNT
- JOB_TOTAL_FUZZ_TIME
- JOB_NEW_CRASH_COUNT

It also adds the job_type field to the FUZZER_RETURN_CODE_COUNT metric.

Part of #4271
  • Loading branch information
vitorguidi authored Oct 29, 2024
1 parent 42ac6d9 commit d61eb37
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 35 deletions.
34 changes: 23 additions & 11 deletions src/clusterfuzz/_internal/bot/tasks/utasks/fuzz_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,16 +414,18 @@ def __exit__(self, exc_type, value, traceback):
monitoring_metrics.FUZZER_TOTAL_FUZZ_TIME.increment_by(
int(duration), {
'fuzzer': self.fuzzer_name,
'timeout': self.timeout
'timeout': self.timeout,
'platform': environment.platform(),
})
monitoring_metrics.JOB_TOTAL_FUZZ_TIME.increment_by(
int(duration), {
'job': self.job_type,
'timeout': self.timeout
'timeout': self.timeout,
'platform': environment.platform(),
})


def _track_fuzzer_run_result(fuzzer_name, generated_testcase_count,
def _track_fuzzer_run_result(fuzzer_name, job_type, generated_testcase_count,
expected_testcase_count, return_code):
"""Track fuzzer run result"""
if expected_testcase_count > 0:
Expand All @@ -444,6 +446,8 @@ def clamp(val, minimum, maximum):
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.increment({
'fuzzer': fuzzer_name,
'return_code': return_code,
'platform': environment.platform(),
'job': job_type,
})


Expand All @@ -462,16 +466,20 @@ def _track_testcase_run_result(fuzzer, job_type, new_crash_count,
monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.increment_by(
known_crash_count, {
'fuzzer': fuzzer,
'platform': environment.platform(),
})
monitoring_metrics.FUZZER_NEW_CRASH_COUNT.increment_by(
new_crash_count, {
'fuzzer': fuzzer,
'platform': environment.platform(),
})
monitoring_metrics.JOB_KNOWN_CRASH_COUNT.increment_by(known_crash_count, {
'job': job_type,
'platform': environment.platform(),
})
monitoring_metrics.JOB_NEW_CRASH_COUNT.increment_by(new_crash_count, {
'job': job_type,
'platform': environment.platform()
})


Expand Down Expand Up @@ -1340,8 +1348,9 @@ def sync_new_corpus_files(self):

self.gcs_corpus.upload_files(filtered_new_files)

def generate_blackbox_testcases(self, fuzzer, fuzzer_directory, testcase_count
) -> GenerateBlackboxTestcasesResult:
def generate_blackbox_testcases(
self, fuzzer, job_type, fuzzer_directory,
testcase_count) -> GenerateBlackboxTestcasesResult:
"""Run the blackbox fuzzer and generate testcases."""
# Helper variables.
fuzzer_name = fuzzer.name
Expand Down Expand Up @@ -1463,7 +1472,7 @@ def generate_blackbox_testcases(self, fuzzer, fuzzer_directory, testcase_count
if fuzzer_run_results:
self.fuzz_task_output.fuzzer_run_results.CopyFrom(fuzzer_run_results)

_track_fuzzer_run_result(fuzzer_name, generated_testcase_count,
_track_fuzzer_run_result(fuzzer_name, job_type, generated_testcase_count,
testcase_count, fuzzer_return_code)

# Make sure that there are testcases generated. If not, set the error flag.
Expand Down Expand Up @@ -1570,8 +1579,8 @@ def do_blackbox_fuzzing(self, fuzzer, fuzzer_directory, job_type):

# Run the fuzzer to generate testcases. If error occurred while trying
# to run the fuzzer, bail out.
generate_result = self.generate_blackbox_testcases(fuzzer, fuzzer_directory,
testcase_count)
generate_result = self.generate_blackbox_testcases(
fuzzer, job_type, fuzzer_directory, testcase_count)
if not generate_result.success:
return None, None, None, None

Expand Down Expand Up @@ -1902,17 +1911,20 @@ def _upload_testcase_run_jsons(testcase_run_jsons):


def handle_fuzz_build_setup_failure(output):
_track_fuzzer_run_result(output.uworker_input.fuzzer_name, 0, 0,
_track_fuzzer_run_result(output.uworker_input.fuzzer_name,
output.uworker_input.job_type, 0, 0,
FuzzErrorCode.BUILD_SETUP_FAILED)


def handle_fuzz_data_bundle_setup_failure(output):
_track_fuzzer_run_result(output.uworker_input.fuzzer_name, 0, 0,
_track_fuzzer_run_result(output.uworker_input.fuzzer_name,
output.uworker_input.job_type, 0, 0,
FuzzErrorCode.DATA_BUNDLE_SETUP_FAILED)


def handle_fuzz_no_fuzzer(output):
_track_fuzzer_run_result(output.uworker_input.fuzzer_name, 0, 0,
_track_fuzzer_run_result(output.uworker_input.fuzzer_name,
output.uworker_input.job_type, 0, 0,
FuzzErrorCode.FUZZER_SETUP_FAILED)


Expand Down
8 changes: 8 additions & 0 deletions src/clusterfuzz/_internal/metrics/monitoring_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
'(grouped by fuzzer)'),
field_spec=[
monitor.StringField('fuzzer'),
monitor.StringField('platform'),
])

FUZZER_NEW_CRASH_COUNT = monitor.CounterMetric(
Expand All @@ -46,6 +47,7 @@
'(grouped by fuzzer)'),
field_spec=[
monitor.StringField('fuzzer'),
monitor.StringField('platform'),
])

JOB_KNOWN_CRASH_COUNT = monitor.CounterMetric(
Expand All @@ -54,6 +56,7 @@
'(grouped by job)'),
field_spec=[
monitor.StringField('job'),
monitor.StringField('platform'),
])

JOB_NEW_CRASH_COUNT = monitor.CounterMetric(
Expand All @@ -62,6 +65,7 @@
'(grouped by job)'),
field_spec=[
monitor.StringField('job'),
monitor.StringField('platform'),
])

FUZZER_RETURN_CODE_COUNT = monitor.CounterMetric(
Expand All @@ -71,6 +75,8 @@
field_spec=[
monitor.StringField('fuzzer'),
monitor.IntegerField('return_code'),
monitor.StringField('platform'),
monitor.StringField('job'),
],
)

Expand All @@ -81,6 +87,7 @@
field_spec=[
monitor.StringField('fuzzer'),
monitor.BooleanField('timeout'),
monitor.StringField('platform'),
],
)

Expand All @@ -91,6 +98,7 @@
field_spec=[
monitor.StringField('job'),
monitor.BooleanField('timeout'),
monitor.StringField('platform'),
],
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,38 +55,46 @@ class TrackFuzzerRunResultTest(unittest.TestCase):

def setUp(self):
monitor.metrics_store().reset_for_testing()
helpers.patch(self, ['clusterfuzz._internal.system.environment.platform'])
self.mock.platform.return_value = 'some_platform'

def test_fuzzer_run_result(self):
"""Ensure _track_fuzzer_run_result set the right metrics."""
fuzz_task._track_fuzzer_run_result('name', 10, 100, 2)
fuzz_task._track_fuzzer_run_result('name', 100, 200, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 2000, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 500, 0)
fuzz_task._track_fuzzer_run_result('name', 0, 1000, -1)
fuzz_task._track_fuzzer_run_result('name', 0, 0, 2)
fuzz_task._track_fuzzer_run_result('fuzzer', 'job', 10, 100, 2)
fuzz_task._track_fuzzer_run_result('fuzzer', 'job', 100, 200, 2)
fuzz_task._track_fuzzer_run_result('fuzzer', 'job', 1000, 2000, 2)
fuzz_task._track_fuzzer_run_result('fuzzer', 'job', 1000, 500, 0)
fuzz_task._track_fuzzer_run_result('fuzzer', 'job', 0, 1000, -1)
fuzz_task._track_fuzzer_run_result('fuzzer', 'job', 0, 0, 2)

self.assertEqual(
4,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 2
'fuzzer': 'fuzzer',
'return_code': 2,
'platform': 'some_platform',
'job': 'job',
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 0
'fuzzer': 'fuzzer',
'return_code': 0,
'platform': 'some_platform',
'job': 'job',
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': -1
'fuzzer': 'fuzzer',
'return_code': -1,
'platform': 'some_platform',
'job': 'job',
}))

testcase_count_ratio = (
monitoring_metrics.FUZZER_TESTCASE_COUNT_RATIO.get({
'fuzzer': 'name'
'fuzzer': 'fuzzer'
}))
self.assertEqual(3.1, testcase_count_ratio.sum)
self.assertEqual(5, testcase_count_ratio.count)
Expand Down Expand Up @@ -130,27 +138,37 @@ class TrackTestcaseRunResultTest(unittest.TestCase):

def setUp(self):
monitor.metrics_store().reset_for_testing()
helpers.patch(self, ['clusterfuzz._internal.system.environment.platform'])
self.mock.platform.return_value = 'some_platform'

def test_testcase_run_result(self):
"""Ensure _track_testcase_run_result sets the right metrics."""
fuzz_task._track_testcase_run_result('fuzzer', 'job', 2, 5)
fuzz_task._track_testcase_run_result('fuzzer', 'job', 5, 10)

self.assertEqual(7,
monitoring_metrics.JOB_NEW_CRASH_COUNT.get({
'job': 'job'
}))
self.assertEqual(
15, monitoring_metrics.JOB_KNOWN_CRASH_COUNT.get({
'job': 'job'
7,
monitoring_metrics.JOB_NEW_CRASH_COUNT.get({
'job': 'job',
'platform': 'some_platform',
}))
self.assertEqual(
7, monitoring_metrics.FUZZER_NEW_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
15,
monitoring_metrics.JOB_KNOWN_CRASH_COUNT.get({
'job': 'job',
'platform': 'some_platform',
}))
self.assertEqual(
15, monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
7,
monitoring_metrics.FUZZER_NEW_CRASH_COUNT.get({
'fuzzer': 'fuzzer',
'platform': 'some_platform',
}))
self.assertEqual(
15,
monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.get({
'fuzzer': 'fuzzer',
'platform': 'some_platform',
}))


Expand Down Expand Up @@ -180,6 +198,8 @@ class TrackFuzzTimeTest(unittest.TestCase):

def setUp(self):
monitor.metrics_store().reset_for_testing()
helpers.patch(self, ['clusterfuzz._internal.system.environment.platform'])
self.mock.platform.return_value = 'some_platform'

def _test(self, timeout):
"""Test helper."""
Expand All @@ -190,7 +210,8 @@ def _test(self, timeout):

fuzzer_total_time = monitoring_metrics.FUZZER_TOTAL_FUZZ_TIME.get({
'fuzzer': 'fuzzer',
'timeout': timeout
'timeout': timeout,
'platform': 'some_platform',
})
self.assertEqual(5, fuzzer_total_time)

Expand Down

0 comments on commit d61eb37

Please sign in to comment.