-
Notifications
You must be signed in to change notification settings - Fork 1
/
test.py
106 lines (94 loc) · 3.46 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#!/usr/bin/env python
import unittest
from copy import copy
from tempfile import NamedTemporaryFile
import pandas as pd
from kit_energy_evaluator import KITEnergyEvaluatror
class TestKitEnergyEvaluator(unittest.TestCase):
"""
Unit tests for mape evaluator
"""
GROUND_TRUTH = pd.DataFrame({
'sensor': [1, 1, 1, 1, 1],
'ts': [pd.Timestamp('2018-01-01T00:00:00.000+01:00'),
pd.Timestamp('2018-01-01T00:00:15.000+01:00'),
pd.Timestamp('2018-01-01T00:00:30.000+01:00'),
pd.Timestamp('2018-01-01T00:45:00.000+01:00'),
pd.Timestamp('2018-01-01T01:00:00.000+01:00')],
'value': [0.5, 0.6, 0.7, 0.8, 0.9]
})
def test_wrong_columns(self):
"""
Test with wrong column names in the prediction file
:return: Should fail
"""
prediction = copy(self.GROUND_TRUTH)
prediction.columns = ['sensor', 'ts', 'foo']
try:
self.run_scenario(prediction)
except Exception:
return
raise AssertionError('should raise an exception')
def test_wrong_length(self):
"""
Test with wrong length of the prediction file
:return: Should fail
"""
prediction = copy(self.GROUND_TRUTH)
prediction = prediction.drop(0)
try:
self.run_scenario(prediction)
except Exception:
return
raise AssertionError('should raise an exception')
def test_wrong_order(self):
"""
Test with the wrong row order in the prediction file
:return: Should fail
"""
prediction = copy(self.GROUND_TRUTH)
prediction = prediction.sort_values('value', ascending=False)
try:
self.run_scenario(prediction)
except Exception:
return
raise AssertionError('should raise an exception')
def test_correct_submission(self):
"""
Test with a correctly formatted prediction file
:return: Should submit
"""
prediction = copy(self.GROUND_TRUTH)
prediction.value = [1.0, .6, .7, .8, .9]
result = self.run_scenario(prediction)
self.assertEqual(type(result), dict)
self.assertSetEqual(set(result.keys()), {'score', 'score_secondary'})
self.assertAlmostEqual(result['score'], 20.)
def test_perfect_submission(self):
"""
Test with a perfect submission
:return: Should submit
"""
prediction = copy(self.GROUND_TRUTH)
result = self.run_scenario(prediction)
self.assertEqual(type(result), dict)
self.assertSetEqual(set(result.keys()), {'score', 'score_secondary'})
self.assertAlmostEqual(result['score'], .0)
def run_scenario(self, prediction):
"""
Run a test scenario
:param prediction: target submission
:return: result of the evaluation
"""
prediction_tmp_file = NamedTemporaryFile()
prediction.to_csv(prediction_tmp_file.name, index=False)
ground_truth_tmp_file = NamedTemporaryFile()
self.GROUND_TRUTH.to_csv(ground_truth_tmp_file.name, index=False)
client_payload = {'predicted_data_path': prediction_tmp_file.name}
evaluator = KITEnergyEvaluatror(ground_truth_tmp_file.name)
result = evaluator._evaluate(client_payload)
prediction_tmp_file.close()
ground_truth_tmp_file.close()
return result
if __name__ == '__main__':
unittest.main()