-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcalib.py
101 lines (80 loc) · 3.34 KB
/
calib.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import numpy
from sklearn import isotonic
from sklearn import linear_model
class iso:
def __init__(self):
self.model_list = []
def fit(self, s, y):
k = numpy.shape(s)[1]
if k == 2:
k = 1
for i in range(0, k):
mdl = isotonic.IsotonicRegression(y_min=0.0, y_max=1.0, out_of_bounds='clip')
mdl.fit(s[:, i], y[:, i])
self.model_list.append(mdl)
def predict_proba(self, s):
k = numpy.shape(s)[1]
if k == 2:
k = 1
s_hat = numpy.zeros_like(s)
for i in range(0, k):
s_hat[:, i] = self.model_list[i].predict(s[:, i])
if k == 1:
s_hat[:, 1] = 1 - s_hat[:, 0]
return s_hat / numpy.sum(s_hat, axis=1).reshape(-1, 1)
class EMP:
def __init__(self, n_bins=10):
self.n_bins = n_bins
self.a = None
def fit(self, s, y):
k = numpy.shape(s)[1]
self.a = numpy.zeros((self.n_bins, k))
bins = numpy.linspace(0, 1, self.n_bins + 1)
for i in range(0, k):
self.a[:, i] = (numpy.sum(((s[:, i] <= bins[1:].reshape(-1, 1)) & (s[:, i] >= bins[:-1].reshape(-1, 1))) * \
y[:, i].reshape(1, -1), axis=1)) / \
(numpy.sum(((s[:, i] <= bins[1:].reshape(-1, 1)) & (s[:, i] >= bins[:-1].reshape(-1, 1))),
axis=1))
def predict_proba(self, s):
k = numpy.shape(s)[1]
s_hat = numpy.zeros_like(s)
bins = numpy.linspace(0, 1, self.n_bins+1)
for i in range(0, k):
tmp_s =((s[:, i] <= bins[1:].reshape(-1, 1)) & (s[:, i] >= bins[:-1].reshape(-1, 1))) * \
self.a[:, i].reshape(-1, 1)
tmp_s[numpy.isnan(tmp_s)] = 0.0
s_hat[:, i] = numpy.sum(tmp_s, axis=0)
return s_hat / numpy.sum(s_hat, axis=1).reshape(-1, 1)
class MAT:
def __init__(self):
self.mdl = None
def fit(self, s, y):
self.mdl = linear_model.LogisticRegression()
s[s[:, -1]<=1e-16, -1] = 1e-16
logit_s = numpy.log(s[:, :-1] / s[:, -1].reshape(-1, 1))
self.mdl.fit(logit_s, y)
def predict_proba(self, s):
s[s[:, -1]<=1e-16, -1] = 1e-16
logit_s = numpy.log(s[:, :-1] / s[:, -1].reshape(-1, 1))
return self.mdl.predict_proba(logit_s)
class BETA:
def __init__(self):
self.model_list = []
def fit(self, s, y):
k = numpy.shape(s)[1]
if k == 2:
k = 1
for i in range(0, k):
mdl = linear_model.LogisticRegression(C=1e16)
mdl.fit(numpy.hstack([numpy.log(s[:, i]).reshape(-1, 1), numpy.log(1-s[:, i]).reshape(-1, 1)]), y[:, i].reshape(-1, 1))
self.model_list.append(mdl)
def predict_proba(self, s):
k = numpy.shape(s)[1]
if k == 2:
k = 1
s_hat = numpy.zeros_like(s)
for i in range(0, k):
s_hat[:, i] = self.model_list[i].predict_proba(numpy.hstack([numpy.log(s[:, i]).reshape(-1, 1), numpy.log(1-s[:, i]).reshape(-1, 1)]))[:, 1]
if k == 1:
s_hat[:, 1] = 1 - s_hat[:, 0]
return s_hat / numpy.sum(s_hat, axis=1).reshape(-1, 1)