-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
135 lines (113 loc) · 5.42 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# import libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import argparse
from utils.utils import Remove_correlateds
from utils import confusion_matrix as plot_cm
from tqdm import tqdm
def main():
#parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--threshold", type=float, default=0.9)
parser.add_argument("--k_features", type=str, default="parsimonious")
parser.add_argument("--path_to_data", type=str, default="/home/timlatypov/PhD/ModelZoo/MSTN_DATA/ml_dataframe.csv")
parser.add_argument("--kernel", type=str, default="linear")
parser.add_argument("--C", type=float, default=0.01)
parser.add_argument("--n_splits", type=int, default=10)
args = parser.parse_args()
# create dataframes
ml_dataframe = pd.read_csv(args.path_to_data)
ml_dataframe = ml_dataframe.drop(columns=['id', 'eTIV'])
ml_dataframe['diagnosis'] = [1 if dx =="MS-TN" else 0 for dx in ml_dataframe['diagnosis']]
print(ml_dataframe.shape)
# drop subjects with missing data
ml_dataframe = ml_dataframe.dropna(axis=0)
print(ml_dataframe.shape)
# set condition
condition = list(ml_dataframe["diagnosis"])
condition = np.array(condition)
# drop unnecessary columns, prepare data for stratification
gm_data = ml_dataframe.drop(columns=['age', 'sex', 'diagnosis', 'duration_of_ms', 'duration_of_pain', 'side_of_pain', 'edss'])
gm_data = gm_data[np.random.default_rng(seed=42).permutation(gm_data.columns.values)]
# stratify
kf = StratifiedKFold(n_splits=args.n_splits, shuffle=True, random_state=20)
rc = Remove_correlateds(threshold=args.threshold)
x = rc.fit(gm_data)
print(x.shape)
gm_cols = list(x.columns)
x = np.array(x)
y = condition
features = []
train_acc = []
test_acc = []
model_proba = None
y_true = None
model_pred = None
fold_number = None
# test
progress_bar = tqdm(enumerate(kf.split(x, y)), desc='Progress', total = kf.get_n_splits(x, y), ascii=" ▖▘▝▗▚▞█", colour="#42cbf5")
for fold, (nest_index, test_index) in progress_bar:
x_nest, x_test = x[nest_index], x[test_index]
y_nest, y_test = y[nest_index], y[test_index]
kf2 = StratifiedKFold(n_splits=args.n_splits-1, shuffle=True, random_state=20)
split = kf2.split(x_nest, y_nest)
model = Pipeline([("scaler", StandardScaler()), ("svm", SVC(kernel=args.kernel, C=args.C, probability=True))])
featureselector = SFS(model, k_features=args.k_features, forward=False, floating=False, scoring="accuracy", cv=list(split), n_jobs=20, verbose=0)
featureselector.fit(x_nest, y_nest)
fold_features = []
for i in featureselector.k_feature_idx_:
features.append(gm_cols[i])
fold_features.append(gm_cols[i])
x_nest = featureselector.transform(x_nest)
x_test = featureselector.transform(x_test)
final_model = Pipeline([("scaler", StandardScaler()), ("svm", SVC(kernel=args.kernel, C=args.C, probability=True))])
final_model.fit(x_nest, y_nest)
model_feature_weights = final_model['svm'].coef_
y_pred_train = final_model.predict(x_nest)
y_pred_test = final_model.predict(x_test)
acc_train = accuracy_score(y_nest, y_pred_train)
acc_test = accuracy_score(y_test, y_pred_test)
test_acc.append(acc_test)
train_acc.append(acc_train)
# confusion matrix
if model_proba is None:
model_proba = final_model.predict_proba(x_test)
y_true = y_test
model_pred = y_pred_test
roc_auc = roc_auc_score(y_true, model_proba[:, 1])
fold_number = np.repeat(fold, len(y_test))
else:
model_proba = np.append(model_proba, final_model.predict_proba(x_test), axis=0)
y_true = np.append(y_true, y_test, axis=0)
model_pred = np.append(model_pred, y_pred_test, axis=0)
roc_auc = np.append(roc_auc, roc_auc_score(y_true, model_proba[:, 1]))
fold_number = np.append(fold_number, np.repeat(fold, len(y_test)), axis=0)
#progress bar
progress_bar.set_postfix({'Train mean': np.mean(train_acc), 'std':np.std(train_acc), 'Test mean': np.mean(test_acc), 'sd':np.std(test_acc), 'roc_auc': np.std(roc_auc)})
# save fold_features as csv
feature_weights = pd.DataFrame(model_feature_weights, columns=fold_features)
feature_weights.to_csv('out/{}_feature_weights.csv'.format(fold))
feature_set = pd.DataFrame([])
feature_set['feature'] = features
# overall results
print("Mean train accuracy: %.4f" % np.mean(train_acc))
print("Mean test accuracy: %.4f" % np.mean(test_acc))
feature_set = feature_set.feature.value_counts()
feature_set = feature_set.to_frame()
feature_set.to_csv('out/important_features.csv')
prediction = pd.DataFrame({'y_true': y_true, 'y_pred': model_pred, 'fold': fold_number})
prediction.to_csv('out/predictions_plotting.csv')
y_proba = pd.DataFrame(model_proba, columns=['MS', 'MS-TN'])
y_proba.to_csv('out/y_proba.csv')
# save model probe
#cm = plot_cm.confusion_matrix(y_true, model_pred, normalize='true')
#plot_cm.plot_confusion_matrix(cm, class_names=['MS', 'MS-TN'], savefig='out/cm.png')
#plot_cm.plot_roc_auc(y_true, model_proba, class_names=['MS', 'MS-TN'], colors=['#B1C8E7', '#E6BA97'], savefig='out/roc.png')
if __name__ == "__main__":
main()