-
Notifications
You must be signed in to change notification settings - Fork 4
/
Vgg10.py
101 lines (96 loc) · 4.7 KB
/
Vgg10.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#! /usr/bin/python3
import tensorflow as tf
import quantization as q
def get_initializer():
return tf.variance_scaling_initializer(scale=1.0, mode='fan_in')
def get_conv_layer_full_prec( x, training, no_filt = 64 ):
cnn = tf.layers.conv1d( x, no_filt, 3, padding = "SAME", use_bias = False )
cnn = tf.layers.max_pooling1d( cnn, 2, 2 )
cnn = tf.layers.batch_normalization( cnn, training = training )
cnn = tf.nn.relu( cnn )
return cnn
def get_conv_layer( x, training, no_filt = 128, nu = None, act_prec = True ):
if nu is None:
return get_conv_layer_full_prec( x, training, no_filt )
filter_shape = [ 3, x.get_shape()[-1], no_filt ]
conv_filter = tf.get_variable( "conv_filter", filter_shape )
tf.summary.histogram( "conv_filter_fp", conv_filter )
conv_filter = q.trinarize( conv_filter, nu = nu )
cnn = tf.nn.conv1d( x, conv_filter, 1, padding = "SAME" )
cnn = tf.layers.max_pooling1d( cnn, 2, 2 )
cnn = tf.layers.batch_normalization( cnn, training = training )
tf.summary.histogram( "conv_dist", cnn )
tf.summary.histogram( "conv_filter_tri", conv_filter )
if act_prec is not None:
cnn = q.shaped_relu( cnn, act_prec )
else:
cnn = tf.nn.relu( cnn )
return cnn
def get_net( x, training = False, use_SELU = False, act_prec = None, nu = None, no_filt = 64, remove_mean = True ):
if remove_mean:
mean, var = tf.nn.moments(x, axes=[1])
mean = tf.expand_dims( mean, 1 )
mean = tf.tile( mean, [ 1, x.get_shape()[1], 1 ] )
x = ( x - mean )
if nu is None:
nu = [None]*9
if act_prec is None:
act_prec = [None]*9
if type(no_filt) == int:
no_filt = [no_filt]*7 + [128,128,24]
else:
assert len(no_filt) == 10, "Incorrect length of no_filt for custom"
with tf.variable_scope("lyr1"):
cnn = get_conv_layer( x, training, no_filt = no_filt[0], nu = nu[0], act_prec = act_prec[0] )
with tf.variable_scope("lyr2"):
cnn = get_conv_layer( cnn, training, no_filt = no_filt[1], nu = nu[1], act_prec = act_prec[1] )
with tf.variable_scope("lyr3"):
cnn = get_conv_layer( cnn, training, no_filt = no_filt[2], nu = nu[2], act_prec = act_prec[2] )
with tf.variable_scope("lyr4"):
cnn = get_conv_layer( cnn, training, no_filt = no_filt[3], nu = nu[3], act_prec = act_prec[3] )
with tf.variable_scope("lyr5"):
cnn = get_conv_layer( cnn, training, no_filt = no_filt[4], nu = nu[4], act_prec = act_prec[4] )
with tf.variable_scope("lyr6"):
cnn = get_conv_layer( cnn, training, no_filt = no_filt[5], nu = nu[5], act_prec = act_prec[5] )
with tf.variable_scope("lyr7"):
cnn = get_conv_layer( cnn, training, no_filt = no_filt[6], nu = nu[6], act_prec = act_prec[6] )
cnn = tf.layers.flatten( cnn )
if use_SELU:
dense_1 = tf.get_variable( "dense_8", [ cnn.get_shape()[-1], no_filt[7] ], initializer = get_initializer() )
dense_2 = tf.get_variable( "dense_9", [ no_filt[7], no_filt[8] ], initializer = get_initializer() )
with tf.variable_scope("dense_1"):
cnn = tf.matmul( cnn, dense_1 )
cnn = tf.nn.selu( cnn )
dropped = tf.contrib.nn.alpha_dropout( cnn, 0.95 )
cnn = tf.where( training, dropped, cnn )
with tf.variable_scope("dense_2"):
cnn = tf.matmul( cnn, dense_2 )
cnn = tf.nn.selu( cnn )
dropped = tf.contrib.nn.alpha_dropout( cnn, 0.95 )
cnn = tf.where( training, dropped, cnn )
else:
dense_1 = tf.get_variable( "dense_8", [ cnn.get_shape()[-1], no_filt[7] ] )
dense_2 = tf.get_variable( "dense_9", [ no_filt[7], no_filt[8] ] )
tf.summary.histogram( "dense_1_fp", dense_1 )
tf.summary.histogram( "dense_2_fp", dense_2 )
dense_1 = q.trinarize( dense_1, nu = nu[7] )
dense_2 = q.trinarize( dense_2, nu = nu[8] )
tf.summary.histogram( "dense_1_tri", dense_1 )
tf.summary.histogram( "dense_2_tri", dense_2 )
with tf.variable_scope("dense_1"):
cnn = tf.matmul( cnn, dense_1 )
cnn = tf.layers.batch_normalization( cnn, training = training )
if act_prec[7] is not None:
cnn = q.shaped_relu( cnn, act_prec[7] )
else:
cnn = tf.nn.relu( cnn )
with tf.variable_scope("dense_2"):
cnn = tf.matmul( cnn, dense_2 )
cnn = tf.layers.batch_normalization( cnn, training = training )
if act_prec[8] is not None:
cnn = q.shaped_relu( cnn, act_prec[8] )
else:
cnn = tf.nn.relu( cnn )
with tf.variable_scope("dense_3"):
pred = tf.layers.dense( cnn, no_filt[9], use_bias = False )
return pred