-
Notifications
You must be signed in to change notification settings - Fork 0
/
DenseNet3D.py
369 lines (320 loc) · 16.2 KB
/
DenseNet3D.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
from tensorflow.keras import backend as K
from tensorflow.keras.utils import get_source_inputs
from tensorflow.keras.layers import (
Activation,
AveragePooling3D,
BatchNormalization,
Conv3D,
Conv3DTranspose,
Dense,
Dropout,
GlobalAveragePooling3D,
GlobalMaxPooling3D,
Input,
MaxPooling3D,
Reshape,
UpSampling3D,
concatenate,
)
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
'''
Model is taken from: https://github.com/GalDude33/DenseNetFCN-3D/blob/master/DenseNet3D.py
DenseNet and DenseNet-FCN models for Keras.
DenseNet is a network architecture where each layer is directly connected
to every other layer in a feed-forward fashion (within each dense block).
For each layer, the feature maps of all preceding layers are treated as
separate inputs whereas its own feature maps are passed on as inputs to
all subsequent layers. This connectivity pattern yields state-of-the-art
accuracies on CIFAR10/100 (with or without data augmentation) and SVHN.
On the large scale ILSVRC 2012 (ImageNet) dataset, DenseNet achieves a
similar accuracy as ResNet, but using less than half the amount of
parameters and roughly half the number of FLOPs
# Reference
- [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf)
- [The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic
Segmentation](https://arxiv.org/pdf/1611.09326.pdf)
'''
def DenseNet3D(input_shape=None,
depth=40,
nb_dense_block=3,
growth_rate=12,
nb_filter=-1,
nb_layers_per_block=-1,
bottleneck=False,
reduction=0.0,
dropout_rate=0.0,
weight_decay=1e-4,
subsample_initial_block=False,
include_top=True,
input_tensor=None,
pooling=None,
classes=10,
activation='softmax',
transition_pooling='avg'):
'''Instantiate the DenseNet architecture.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 224, 3)` (with `channels_last` dim ordering)
or `(3, 224, 224, 224)` (with `channels_first` dim ordering).
It should have exactly 4 inputs channels,
and width and height should be no smaller than 8.
E.g. `(224, 224, 224, 3)` would be one valid value.
depth: number or layers in the DenseNet
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. -1 indicates initial
number of filters will default to 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the network depth.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be nb_dense_block
bottleneck: flag to add bottleneck blocks in between dense blocks
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Changes model type to suit different datasets.
Should be set to True for ImageNet, and False for CIFAR datasets.
When set to True, the initial convolution will be strided and
adds a MaxPooling3D before the initial dense block.
include_top: whether to include the fully-connected
layer at the top of the network.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True.
activation: Type of activation at the top layer. Can be one of
'softmax' or 'sigmoid'. Note that if sigmoid is used,
classes must be 1.
transition_pooling: `avg` for avg pooling (default), `max` for max pooling,
None for no pooling during scale transition blocks. Please note that this
default differs from the DenseNetFCN paper in accordance with the DenseNet
paper.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid input shape.
'''
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_dense_net(classes, img_input, include_top, depth, nb_dense_block,
growth_rate, nb_filter, nb_layers_per_block, bottleneck,
reduction, dropout_rate, weight_decay,
subsample_initial_block, pooling, activation,
transition_pooling)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='densenet')
return model
'''
For this work we used 3 blocks of DenseNet121 with 6, 12, 24 layers in each block respectively.
'''
def DenseNet3DImageNet121(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet3D(input_shape, depth=121, nb_dense_block=3, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6,12,24],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def name_or_none(prefix, name):
return prefix + name if (prefix is not None and name is not None) else None
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None,
weight_decay=1e-4, block_prefix=None):
with K.name_scope('ConvBlock'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bn'))(ip)
x = Activation('relu')(x)
if bottleneck:
inter_channel = nb_filter * 4
x = Conv3D(inter_channel, (1, 1, 1), kernel_initializer='he_normal',
padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_bottleneck_Conv3D'))(x)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bottleneck_bn'))(x)
x = Activation('relu')(x)
x = Conv3D(nb_filter, (3, 3, 3), kernel_initializer='he_normal', padding='same',
use_bias=False, name=name_or_none(block_prefix, '_Conv3D'))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False,
dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True,
return_concat_list=False, block_prefix=None):
with K.name_scope('DenseBlock'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x_list = [x]
for i in range(nb_layers):
cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay,
block_prefix=name_or_none(block_prefix, '_%i' % i))
x_list.append(cb)
x = concatenate([x, cb], axis=concat_axis)
if grow_nb_filters:
nb_filter += growth_rate
if return_concat_list:
return x, nb_filter, x_list
else:
return x, nb_filter
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4,
block_prefix=None, transition_pooling='max'):
with K.name_scope('Transition'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bn'))(ip)
x = Activation('relu')(x)
x = Conv3D(int(nb_filter * compression), (1, 1, 1), kernel_initializer='he_normal',
padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_Conv3D'))(x)
if transition_pooling == 'avg':
x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2))(x)
elif transition_pooling == 'max':
x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2))(x)
return x
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4,
block_prefix=None):
with K.name_scope('TransitionUp'):
if type == 'upsampling':
x = UpSampling3D(name=name_or_none(block_prefix, '_upsampling'))(ip)
elif type == 'subpixel':
x = Conv3D(nb_filters, (3, 3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), use_bias=False,
kernel_initializer='he_normal',
name=name_or_none(block_prefix, '_Conv3D'))(ip)
x = SubPixelUpscaling(scale_factor=2,
name=name_or_none(block_prefix, '_subpixel'))(x)
x = Conv3D(nb_filters, (3, 3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), use_bias=False,
kernel_initializer='he_normal',
name=name_or_none(block_prefix, '_Conv3D'))(x)
else:
x = Conv3DTranspose(nb_filters, (3, 3, 3), activation='relu', padding='same',
strides=(2, 2, 2), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_Conv3DT'))(ip)
return x
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3,
growth_rate=12, nb_filter=-1, nb_layers_per_block=-1,
bottleneck=False, reduction=0.0, dropout_rate=None,
weight_decay=1e-4, subsample_initial_block=False, pooling=None,
activation='softmax', transition_pooling='avg'):
with K.name_scope('DenseNet'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if reduction != 0.0:
if not (reduction <= 1.0 and reduction > 0.0):
raise ValueError('`reduction` value must lie between 0.0 and 1.0')
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
if len(nb_layers) != nb_dense_block:
raise ValueError('If `nb_dense_block` is a list, its length must match '
'the number of layers provided by `nb_layers`.')
final_nb_layer = nb_layers[-1]
nb_layers = nb_layers[:-1]
else:
if nb_layers_per_block == -1:
assert (depth - 4) % 3 == 0, ('Depth must be 3 N + 4 '
'if nb_layers_per_block == -1')
count = int((depth - 4) / 3)
if bottleneck:
count = count // 2
nb_layers = [count for _ in range(nb_dense_block)]
final_nb_layer = count
else:
final_nb_layer = nb_layers_per_block
nb_layers = [nb_layers_per_block] * nb_dense_block
# compute initial nb_filter if -1, else accept users initial nb_filter
if nb_filter <= 0:
nb_filter = 2 * growth_rate
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
if subsample_initial_block:
initial_kernel = (7, 7, 7)
initial_strides = (2, 2, 2)
else:
initial_kernel = (3, 3, 3)
initial_strides = (1, 1, 1)
x = Conv3D(nb_filter, initial_kernel, kernel_initializer='he_normal',
padding='same', name='initial_Conv3D', strides=initial_strides,
use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
if subsample_initial_block:
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name='initial_bn')(x)
x = Activation('relu')(x)
x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter,
growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % block_idx)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression,
weight_decay=weight_decay,
block_prefix='tr_%i' % block_idx,
transition_pooling=transition_pooling)
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_block
x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate,
bottleneck=bottleneck, dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % (nb_dense_block - 1))
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, name='final_bn')(x)
x = Activation('relu')(x)
x = GlobalAveragePooling3D()(x)
x = Dense(units=512, activation='relu')(x)
x = Dropout(0.4)(x)
x = Dense(units=512, activation='relu')(x)
x = Dropout(0.4)(x)
x = Dense(units=256, activation='relu')(x)
x = Dropout(0.4)(x)
x = Dense(units=128, activation='relu')(x)
x = Dropout(0.4)(x)
x = Dense(1, activation="tanh")(x)
return x