-
Notifications
You must be signed in to change notification settings - Fork 0
/
models.py
181 lines (151 loc) · 8.53 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model
import tensorflow_probability as tfp
tfd = tfp.distributions
def create_intervention_model(task, longest_speaker_length, uncertainty):
model = tf.keras.Sequential()
model.add(layers.LSTM(16, input_shape=(longest_speaker_length, 3)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.BatchNormalization())
if task == 'classification':
model.add(layers.Dense(2, activation='softmax'))
elif task == 'regression':
if uncertainty:
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(2))
model.add(tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6)))
else:
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(8, activation='relu'))
model.add(layers.Dense(1))
model.add(layers.ReLU(max_value=30))
return model
def create_pause_model(task, n_features, uncertainty):
model = tf.keras.Sequential()
model.add(layers.Input(shape=(n_features,)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(16, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(24, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(24, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.1))
if task == 'classification':
model.add(layers.Dense(2, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
elif task == 'regression':
if uncertainty:
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(2))
model.add(tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6)))
else:
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1))
model.add(layers.ReLU(max_value=30))
return model
def create_compare_model(task, features_size, uncertainty):
model = tf.keras.Sequential()
model.add(layers.Input(shape=(features_size,)))
model.add(layers.Dense(24, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
if task == 'classification':
model.add(layers.Dense(2, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
elif task == 'regression':
if uncertainty:
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(2))
model.add(tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6)))
else:
model.add(layers.Dense(8, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.Dense(8, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01)))
model.add(layers.ReLU(max_value=30))
return model
def create_spectogram_model(spectogram_size):
model2_input = layers.Input(shape=spectogram_size, name='spectrogram_input')
model2_BN = layers.BatchNormalization()(model2_input)
model2_hidden1 = layers.Conv2D(16, kernel_size=(3, 3), strides=(1, 1),
activation='relu')(model2_BN)
# model2_hidden2 = layers.Conv2D(16, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden1)
model2_BN1 = layers.BatchNormalization()(model2_hidden1)
model2_hidden2 = layers.MaxPool2D()(model2_BN1)
model2_hidden3 = layers.Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
activation='relu')(model2_hidden2)
# model2_hidden4 = layers.Conv2D(32, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden3)
model2_BN2 = layers.BatchNormalization()(model2_hidden3)
model2_hidden4 = layers.MaxPool2D()(model2_BN2)
model2_hidden5 = layers.Conv2D(64, kernel_size=(5, 5), strides=(1, 1),
activation='relu')(model2_hidden4)
# model2_hidden6 = layers.Conv2D(64, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden5)
model2_BN3 = layers.BatchNormalization()(model2_hidden5)
model2_hidden6 = layers.MaxPool2D()(model2_BN3)
model2_hidden7 = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1),
activation='relu')(model2_hidden6)
# model2_hidden8 = layers.Conv2D(128, kernel_size=(3, 3), strides=(2, 2),
# activation='relu')(model2_hidden7)
model2_BN4 = layers.BatchNormalization()(model2_hidden7)
model2_hidden8 = layers.MaxPool2D()(model2_BN4)
model2_hidden9 = layers.Flatten()(model2_hidden8)
# model2_hidden10 = layers.Dropout(0.2)(model2_hidden9)
model2_hidden10 = layers.BatchNormalization()(model2_hidden9)
model2_hidden11 = layers.Dense(128, activation='relu')(model2_hidden10)
model2_output = layers.Dropout(0.2)(model2_hidden11)
model2_output = layers.Dense(2, activation='softmax')(model2_output)
model = Model(model2_input, model2_output)
return model
def create_silences_model(task, uncertainty):
model2_input = layers.Input(shape=(800, 1), name='silences_input')
model2_input_BN = layers.BatchNormalization()(model2_input)
model2_hidden1 = layers.Conv1D(16, kernel_size=3, strides=1,
activation='relu')(model2_input_BN)
model2_BN1 = layers.BatchNormalization()(model2_hidden1)
model2_hidden2 = layers.MaxPool1D()(model2_BN1)
model2_hidden3 = layers.Conv1D(32, kernel_size=3, strides=1,
activation='relu')(model2_hidden2)
model2_BN2 = layers.BatchNormalization()(model2_hidden3)
model2_hidden4 = layers.MaxPool1D()(model2_BN2)
model2_hidden5 = layers.Conv1D(64, kernel_size=5, strides=1,
activation='relu')(model2_hidden4)
model2_BN3 = layers.BatchNormalization()(model2_hidden5)
model2_hidden6 = layers.MaxPool1D()(model2_BN3)
model2_hidden7 = layers.Conv1D(128, kernel_size=5, strides=1,
activation='relu')(model2_hidden6)
model2_BN4 = layers.BatchNormalization()(model2_hidden7)
model2_hidden8 = layers.MaxPool1D()(model2_BN4)
model2_hidden9 = layers.Flatten()(model2_hidden8)
model2_hidden10 = layers.BatchNormalization()(model2_hidden9)
model2_hidden11 = layers.Dense(128, activation='relu')(model2_hidden10)
model2_output = layers.Dropout(0.2)(model2_hidden11)
if task=='classification':
model2_output = layers.Dense(2, activation='softmax')(model2_output)
elif task=='regression':
if uncertainty:
model2_output = layers.Dense(2)(model2_output)
model2_output = layers.ReLU(max_value=30)(model2_output)
model2_output = tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :1], scale=tf.math.softplus(t[...,1:])+1e-6))(model2_output)
else:
model2_output = layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(0.01), activity_regularizer=tf.keras.regularizers.l1(0.01))(model2_output)
model2_output = layers.ReLU(max_value=30)(model2_output)
model = Model(model2_input, model2_output)
return model