forked from btothey99/DibidibidipLearning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
facedetect
154 lines (121 loc) · 4.06 KB
/
facedetect
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
!git clone https://github.com/opencv/opencv.git
!mkdir Video
!pip install ffmpeg-python
from IPython.display import HTML, Javascript, display
from google.colab.output import eval_js
from base64 import b64decode
import numpy as np
import io
import ffmpeg
video_file_test = '/content/Video/osy_test.mp4'
VIDEO_HTML = """
<script>
var my_div = document.createElement("DIV");
var my_p = document.createElement("P");
var my_btn = document.createElement("BUTTON");
var my_btn_txt = document.createTextNode("Press to start recording");
my_btn.appendChild(my_btn_txt);
my_div.appendChild(my_btn);
document.body.appendChild(my_div);
var base64data = 0;
var reader;
var recorder, videoStream;
var recordButton = my_btn;
var handleSuccess = function(stream) {
videoStream = stream;
var options = {
mimeType : 'video/webm;codecs=vp9'
};
recorder = new MediaRecorder(stream, options);
recorder.ondataavailable = function(e) {
var url = URL.createObjectURL(e.data);
var preview = document.createElement('video');
preview.controls = true;
preview.src = url;
document.body.appendChild(preview);
reader = new FileReader();
reader.readAsDataURL(e.data);
reader.onloadend = function() {
base64data = reader.result;
}
};
recorder.start();
};
recordButton.innerText = "Recording... press to stop";
navigator.mediaDevices.getUserMedia({video: true}).then(handleSuccess);
function toggleRecording() {
if (recorder && recorder.state == "recording") {
recorder.stop();
videoStream.getVideoTracks()[0].stop();
recordButton.innerText = "Saving the recording... Please wait!"
}
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
var data = new Promise(resolve=>{
recordButton.onclick = ()=>{
toggleRecording()
sleep(2000).then(() => {
// wait 2000ms for the data to be available
resolve(base64data.toString())
});
}
});
</script>
"""
def start_webcam():
js = Javascript('''
async function startWebcam() {
const div = document.createElement('div');
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
return;
}
''')
display(js)
data = eval_js('startWebcam()')
start_webcam()
def get_video():
display(HTML(VIDEO_HTML))
data = eval_js("data")
binary = b64decode(data.split(',')[1])
return binary
videofile = get_video()
with open(video_file_test, 'wb') as f:
f.write(videofile)
import cv2
from google.colab.patches import cv2_imshow
face_cascade = cv2.CascadeClassifier('/content/opencv/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/content/opencv/data/haarcascades/haarcascade_eye.xml')
upper_cascade = cv2.CascadeClassifier('/content/opencv/data/haarcascades/haarcascade_upperbody.xml')
cap = cv2.VideoCapture(video_file_test)
count = 1
while cap.isOpened() & count<30:
ret, img = cap.read()
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2BGR)
# Detects faces of different sizes in the input image
faces = face_cascade.detectMultiScale(img, 1.3, 5)
uppers = upper_cascade.detectMultiScale(img, 1.3, 5)
for (ux,uy,uw,uh) in uppers:
cv2.rectangle(img,(ux,uy),(ux+uw,uy+uh),(255,255,0),2)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
# roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_color)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,127,255),2)
cv2_imshow(img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()