-
Notifications
You must be signed in to change notification settings - Fork 27
/
phoframeTest.py
160 lines (124 loc) · 5.24 KB
/
phoframeTest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import tensorflow as tf
import numpy as np
from scipy import misc
import random
import math
import os
phoframeFile = open("/media/rob/Ma Book1/CS 230/videoToVoice/3/phoframes.txt","r")
phoframes = phoframeFile.read().split("\n")
FOLDER_SAVE_NAME = "phoframe41"
if not os.path.exists(FOLDER_SAVE_NAME):
os.makedirs(FOLDER_SAVE_NAME)
if not os.path.exists(FOLDER_SAVE_NAME+"/samples"):
os.makedirs(FOLDER_SAVE_NAME+"/samples")
if not os.path.exists(FOLDER_SAVE_NAME+"/models"):
os.makedirs(FOLDER_SAVE_NAME+"/models")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def getRandomFrame():
#return 5+int(math.floor(random.randrange(0, 60)))
f = int(math.floor(random.randrange(14, 120000)))
while not isFValid(f): # Exclude portions of the video with no visible mouth
f = int(math.floor(random.randrange(14, 120000)))
return f
def isFValid(f):
for nearF in range(f-14,f+15):
strIndex = str(nearF)
while len(strIndex) < 4:
strIndex = "0"+strIndex
if not os.path.exists('3/mouthImages/frame'+strIndex+'.jpg'):
return False
return True # As of now, i can't remember where the invalid frames are.
def getInVidsAtFrame(f):
arr = np.zeros([1, INVID_HEIGHT,INVID_WIDTH,INVID_DEPTH])
for imageIndex in range(0,29):
strIndex = str(f-14+imageIndex)
while len(strIndex) < 4:
strIndex = "0"+strIndex
newImage = misc.imread('3/mouthImages/frame'+strIndex+'.jpg')
if newImage.shape[0] > INVID_HEIGHT:
extraMargin = (newImage.shape[0]-INVID_HEIGHT)//2
newImage = newImage[extraMargin:extraMargin+INVID_HEIGHT,:,:]
if newImage.shape[1] > INVID_WIDTH:
extraMargin = (newImage.shape[1]-INVID_WIDTH)//2
newImage = newImage[:,extraMargin:extraMargin+INVID_WIDTH,:]
h = newImage.shape[0]
w = newImage.shape[1]
yStart = (INVID_HEIGHT-h)//2
xStart = (INVID_WIDTH-w)//2
arr[:,yStart:yStart+h,xStart:xStart+w,imageIndex*3:(imageIndex+1)*3] = newImage
return np.asarray(arr)/255.0
def getLabelsAtFrame(f):
return int(phoframes[f])
INVID_WIDTH = 256 # mouth width
INVID_HEIGHT = 256 # mouth height
INVID_DEPTH = 87 # 29 images of R, G, B
PHONEME_CATEGORIES = 41
learning_rate = 0.0002
invids_ = tf.placeholder(tf.float32, (None, INVID_HEIGHT, INVID_WIDTH, INVID_DEPTH), name='invids')
labels_ = tf.placeholder(tf.int32, (None), name='labels')
### Encode the invids
conv1 = tf.layers.conv2d(inputs=invids_, filters=40, kernel_size=(5,5), strides=(2,2), padding='same', activation=tf.nn.relu)
# Now 128x128x40
maxpool1 = tf.layers.max_pooling2d(conv1, pool_size=2, strides=(2,2), padding='same')
# Now 64x64x40
conv2 = tf.layers.conv2d(inputs=maxpool1, filters=70, kernel_size=(5,5), padding='same', activation=tf.nn.relu)
# Now 64x64x70
maxpool2 = tf.layers.max_pooling2d(conv2, pool_size=2, strides=(2,2), padding='same')
# Now 32x32x70
conv3 = tf.layers.conv2d(inputs=maxpool2, filters=100, kernel_size=(5,5), padding='same', activation=tf.nn.relu)
# Now 32x32x100
maxpool3 = tf.layers.max_pooling2d(conv3, pool_size=2, strides=(2,2), padding='same')
# Now 16x16x100
conv4 = tf.layers.conv2d(inputs=maxpool3, filters=130, kernel_size=(5,5), padding='same', activation=tf.nn.relu)
# Now 16x16x130
maxpool4 = tf.layers.max_pooling2d(conv4, pool_size=4, strides=(4,4), padding='same')
# Now 4x4x130 (flatten to 2080)
maxpool4_flat = tf.reshape(maxpool4, [-1,4*4*130])
# Now 2080
W_fc1 = weight_variable([2080, 1000])
b_fc1 = bias_variable([1000])
fc1 = tf.nn.relu(tf.matmul(maxpool4_flat, W_fc1) + b_fc1)
W_fc2 = weight_variable([1000, 300])
b_fc2 = bias_variable([300])
fc2 = tf.nn.relu(tf.matmul(fc1, W_fc2) + b_fc2)
W_fc3 = weight_variable([300, PHONEME_CATEGORIES])
b_fc3 = bias_variable([PHONEME_CATEGORIES])
logits = tf.matmul(fc2, W_fc3) + b_fc3
#Now 114
onehot_labels = tf.one_hot(indices=labels_, depth=PHONEME_CATEGORIES)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels_, logits=logits)
output = tf.nn.softmax(logits,name=None)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
print("made it here! :D")
sess = tf.Session()
RANGE_START = 120030
RANGE_END = 131030
epochs = 2000000
batch_size = 50
MODEL_SAVE_EVERY = 50
SAVE_FILE_START_POINT = 5750
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
if SAVE_FILE_START_POINT >= 1:
saver.restore(sess, FOLDER_SAVE_NAME+"/models/model"+str(SAVE_FILE_START_POINT)+".ckpt")
print("about to start...")
f = open(FOLDER_SAVE_NAME+'/outputted.txt','w')
for frame in range(RANGE_START,RANGE_END):
invids = np.empty([0,INVID_HEIGHT,INVID_WIDTH,INVID_DEPTH])
labels = np.empty(0)
invids = np.vstack((invids,getInVidsAtFrame(frame)))
labels = np.append(labels,getLabelsAtFrame(frame))
_output, batch_cost, _logits = sess.run([output, cost, logits],
feed_dict={invids_: invids, labels_: labels})
for i in _output[0]:
f.write(str(i)+"\t");
f.write("\n");
print("Done with "+str(frame-RANGE_START)+" / "+str(RANGE_END-RANGE_START))
f.close()