-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
executable file
·148 lines (121 loc) · 4.88 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#!/usr/bin/env python2
#
import time
start = time.time()
import argparse
import cv2
import itertools
import os
from openface.data import iterImgs
import numpy as np
np.set_printoptions(precision=2)
import openface
import pickle
import sys
import pandas as pd
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, 'models')
# modelDir = os.path.join('/root/openface/models') # docker image bamos/openface
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('--targetDir', type=str, help="Path to raw images with faces of the target person.",
default="./rawimg/")
parser.add_argument('imgTest', type=str, nargs='+', help="Input images.")
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
print("Argument parsing and loading libraries took {} seconds.".format(
time.time() - start))
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, args.imgDim)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
def getRep(imgPath):
if args.verbose:
print("Processing {}.".format(imgPath))
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if bb is None:
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print(" + Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFace = align.align(args.imgDim, rgbImg, bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print(" + Face alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print(" + OpenFace forward pass took {} seconds.".format(time.time() - start))
# print("Representation:")
# print(type(rep))
# print(rep)
print("-----\n")
return rep
fName = "./rep.pkl"
fName2 = "./rep.csv"
if os.path.isfile(fName):
print("Loading mean representation from '{}'".format(fName))
with open(fName, 'r') as f:
(repMean, repMedian) = pickle.load(f)
with open(fName2, 'r') as f:
repAll = pd.read_csv(fName2, header=None).as_matrix()
print repAll.shape
else:
# iter through images in input dir, get representations, calculate mean/median
repList = list()
imgs = list(iterImgs(args.targetDir))[0:10]
for imgObject in imgs:
print("=== {} ===".format(imgObject.path))
try:
imgRep = getRep(imgObject.path)
repList.append(imgRep)
except Exception as exc:
print exc
repAll = np.vstack(repList)
print "=== representations:"
print repAll.shape
repMean = np.mean(repAll, axis=0)
repMedian = np.median(repAll, axis=0)
# store embeddings in a csv file
with open(fName2, 'w') as f:
pd.DataFrame(repAll).to_csv(f, header=False, index=False)
# store in a pickle file
print("Saving mean representation to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((repMean, repMedian), f)
# calculate distance between mean representation and test image
for imgtest in args.imgTest:
try:
imgRep = getRep(imgtest)
except Exception as exc:
print "Something unexpected happened:"
print exc
continue
else:
dmean = repMean - imgRep
dmedian = repMedian - imgRep
print("Squared l2 distance between mean representation and test image ({}): {:0.3f}".format(imgtest, np.dot(dmean, dmean)))
print("Squared l2 distance between median representation and test image ({}): {:0.3f}".format(imgtest, np.dot(dmedian, dmedian)))
# distance to each training image
distAll = repAll - imgRep
distAll = np.sum(distAll * distAll, axis=1)
print("distance to each training image: {}".format(distAll))