-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
168 lines (130 loc) · 6.18 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import eel
import argparse
import cv2 as cv
import numpy as np
from yolo_utils import infer_image
eel.init('website')
model_path = './yolov3-coco/'
weights_path = './yolov3-coco/yolov3.weights'
config_path = './yolov3-coco/yolov3.cfg'
vid_output_path = './output.mp4'
labels_path = './yolov3-coco/coco-labels'
confidence = 0.5
threshold = 0.3
show_Time = False
@eel.expose
def update_params(weightsPath, outputPath, confidenceValue, threshold_Value, showTime):
global weights_path, vid_output_path, confidence, threshold, show_Time
weights_path, vid_output_path = weightsPath, outputPath
confidence, threshold, show_Time = confidenceValue, threshold_Value, showTime
print("Params updated")
print("out :" + vid_output_path)
@eel.expose
def test(videoPath):
labels = open(labels_path).read().strip().split('\n')
# Initializing colors to represent each label uniquely
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Load the weights and configuration to form the pre-trained YOLOv3 model
net = cv.dnn.readNetFromDarknet(config_path, weights_path)
# Get the output layer names of the model
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
height, width = None, None
try:
vid = cv.VideoCapture(videoPath)
writer = None
except Exception:
raise Exception("Video cannot be loaded!\n Please check the path provided!")
finally:
while True:
grabbed, frame = vid.read()
# Checking if the complete video is read
if not grabbed:
break
if width is None or height is None:
height, width = frame.shape[:2]
frame, _, _, _, _ = infer_image(net, layer_names, height, width, frame, colors, labels, show_Time,
float(confidence), float(threshold))
if writer is None:
# Initialize the video writer
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(vid_output_path, fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
cv.imshow('Video', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
writer.release()
vid.release()
@eel.expose
def webCam():
print("Starting Inference on WebCam")
# FLAGS, unparsed = pass_args()
labels = labels_path
# Init colors to represent each label uniquely
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Load the weights and configuration to form the pre-trained YOLOv3 model
net = cv.dnn.readNetFromDarknet(config_path, weights_path)
# Get the output layer names of the model
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1]
for i in net.getUnconnectedOutLayers()]
# Infer real-time on webcam
count = 0
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
height, width = frame.shape[:2]
if count == 0:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names,
height, width, frame, colors, labels, confidences,
classids, idxs)
count += 1
else:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, height, width, frame, colors,
labels, show_Time, confidence, threshold, boxes,
confidences, classids, idxs,
infer=False)
count = (count + 1) % 6
cv.imshow('webcam', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv.destroyAllWindows()
def pass_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path', type=str, default='./yolov3-coco/',
help='The directory where the model weights and configuration'
' files are.')
parser.add_argument('-w', '--weights', type=str, default='./yolov3-coco/yolov3.weights',
help='Path to the file which contains the weights for YOLOv3.')
parser.add_argument('-cfg', '--config', type=str, default='./yolov3-coco/yolov3.cfg',
help='Path to the configuration file for the YOLOv3 model.')
parser.add_argument('-i', '--image-path', type=str,
help='The path to the image file')
parser.add_argument('-v', '--video-path', type=str,
help='The path to the video file')
parser.add_argument('-vo', '--video-output-path', type=str, default='./output.avi',
help='The path of the output video file')
parser.add_argument('-l', '--labels', type=str,
default='./yolov3-coco/coco-labels',
help='Path to the file having the labels in a new-line seperated way.')
parser.add_argument('-c', '--confidence', type=float, default=0.5,
help='The model will reject boundaries which has a' +
'probability less than the confidence value.' +
'default: 0.5')
parser.add_argument('-th', '--threshold', type=float, default=0.3,
help='The threshold to use when applying the Non-Max Suppresion')
parser.add_argument('--download-model', type=bool, default=False,
help='Set to True, if the model weights and configurations ' +
'are not present on your local machine.')
parser.add_argument('-t', '--show-time',
type=bool,
default=False,
help='Show the time taken to infer each image.')
return parser.parse_known_args()
options = {
'mode': 'chrome',
'port': 1235
}
eel.start('index.html',port=7478)