forked from xuzr/multiclassify-eval
-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluation.py
executable file
·82 lines (68 loc) · 4.19 KB
/
evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import numpy as np
class Evaluations():
def __init__(self,pred,gt,classes):
if type(pred)!=np.ndarray:
pred=np.array(pred)
if type(gt)!=np.array:
gt=np.array(gt)
self.tp = 0
self.fn = 0
self.fp = 0
self.tn = 0
self.classes = classes
for class_ in classes:
index_ = classes.index(class_)
tp_ = ((pred == index_)&(gt == index_)).sum()
self.tp += tp_
fn_ = ((pred != index_)&(gt == index_)).sum()
self.fn += fn_
fp_ = ((pred == index_)&(gt != index_)).sum()
self.fp += fp_
tn_ = ((pred != index_)&(gt != index_)).sum()
self.tn += tn_
setattr(self,class_,Evaluation(tp_,fn_,fp_,tn_))
setattr(self,'average',Evaluation(self.tp,self.fn,self.fp,self.tn))
def __repr__(self):
splitline_str = '*'*230
classesline_str = ' '*15 + ' |Aveg| ' + ''.join(['|{}| '.format(self.classes[i]) for i in range(len(self.classes))])
preline_str = 'precision: \t'+ '{:0.4f} '.format(getattr(self,'average').precision()) +''.join([' {:0.4f} '.format(getattr(self,self.classes[i]).precision()) for i in range(len(self.classes))])
recline_str = 'recall: \t'+ '{:0.4f} '.format(getattr(self,'average').recall()) +''.join([' {:0.4f} '.format(getattr(self,self.classes[i]).recall()) for i in range(len(self.classes))])
acurline_str = 'accuracy: \t'+ '{:0.4f} '.format(getattr(self,'average').accuracy()) +''.join([' {:0.4f} '.format(getattr(self,self.classes[i]).accuracy()) for i in range(len(self.classes))])
f1score_str = 'f1_score: \t'+ '{:0.4f} '.format(getattr(self,'average').f1_score()) +''.join([' {:0.4f} '.format(getattr(self,self.classes[i]).f1_score()) for i in range(len(self.classes))])
return splitline_str+'\n'+classesline_str+'\n'+preline_str+'\n'+recline_str+'\n'+acurline_str+'\n'+f1score_str+'\n'+splitline_str
def __dir__(self):
dir_ = ['average']
dir_.extend(self.classes)
return dir_
def writelog(self,writer,key='average',path='',global_step = None):
#write.add_scalar('train/accuracy',accuracy_,global_step=iterations_)
if key in dir(self):
writer.add_scalar(path+'/{}/precision'.format(key),getattr(self,key).precision(),global_step=global_step)
writer.add_scalar(path+'/{}/recall'.format(key),getattr(self,key).recall(),global_step=global_step)
writer.add_scalar(path+'/{}/accuracy'.format(key),getattr(self,key).accuracy(),global_step=global_step)
writer.add_scalar(path+'/{}/f1_score'.format(key),getattr(self,key).f1_score(),global_step=global_step)
elif key == 'ALL':
for attr_ in dir(self):
writer.add_scalar(path+'/{}/precision'.format(attr_),getattr(self,attr_).precision(),global_step=global_step)
writer.add_scalar(path+'/{}/recall'.format(attr_),getattr(self,attr_).recall(),global_step=global_step)
writer.add_scalar(path+'/{}/accuracy'.format(attr_),getattr(self,attr_).accuracy(),global_step=global_step)
writer.add_scalar(path+'/{}/f1_score'.format(attr_),getattr(self,attr_).f1_score(),global_step=global_step)
else:
writer.add_scalar(path+'/{}/precision'.format('average'),getattr(self,'average').precision(),global_step=global_step)
writer.add_scalar(path+'/{}/recall'.format('average'),getattr(self,'average').recall(),global_step=global_step)
writer.add_scalar(path+'/{}/accuracy'.format('average'),getattr(self,'average').accuracy(),global_step=global_step)
writer.add_scalar(path+'/{}/f1_score'.format('average'),getattr(self,'average').f1_score(),global_step=global_step)
class Evaluation():
def __init__(self,tp,fn,fp,tn):
self.tp = tp
self.fn = fn
self.fp = fp
self.tn = tn
def precision(self):
return self.tp/(self.tp + self.fp)
def recall(self):
return self.tp/(self.tp + self.fn)
def accuracy(self):
return (self.tp + self.tn)/(self.tn+self.tp+self.fn+self.fp)
def f1_score(self):
return 2*self.tp/(2*self.tp+self.fn+self.fp)