-
Notifications
You must be signed in to change notification settings - Fork 18
/
edge_detector.py
70 lines (58 loc) · 2.72 KB
/
edge_detector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import cv2 as cv
import argparse
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(
description='This sample shows how to define custom OpenCV deep learning layers in Python. '
'Holistically-Nested Edge Detection (https://arxiv.org/abs/1504.06375) neural network '
'is used as an example model. Find a pre-trained model at https://github.com/s9xie/hed.')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to deploy.prototxt', required=True)
parser.add_argument('--caffemodel', help='Path to hed_pretrained_bsds.caffemodel', required=True)
parser.add_argument('--width', help='Resize input image to a specific width', default=256, type=int)
parser.add_argument('--height', help='Resize input image to a specific height', default=256, type=int)
parser.add_argument('--savefile', help='Specifies the output video path', default='output.mp4', type=str)
args = parser.parse_args()
class CropLayer(object):
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
# Our layer receives two inputs. We need to crop the first input blob
# to match a shape of the second one (keeping batch size and number of channels)
def getMemoryShapes(self, inputs):
inputShape, targetShape = inputs[0], inputs[1]
batchSize, numChannels = inputShape[0], inputShape[1]
height, width = targetShape[2], targetShape[3]
self.ystart = int((inputShape[2] - targetShape[2]) / 2)
self.xstart = int((inputShape[3] - targetShape[3]) / 2)
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batchSize, numChannels, height, width]]
def forward(self, inputs):
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]]
# Load the model.
net = cv.dnn.readNetFromCaffe(args.prototxt, args.caffemodel)
cv.dnn_registerLayer('Crop', CropLayer)
image=cv.imread(args.input)
image=cv.resize(image,(args.width,args.height))
inp = cv.dnn.blobFromImage(image, scalefactor=1.0, size=(args.width, args.height),
mean=(104.00698793, 116.66876762, 122.67891434),
swapRB=False, crop=False)
net.setInput(inp)
# edges = cv.Canny(image,image.shape[1],image.shape[0])
out = net.forward()
out = out[0, 0]
out = cv.resize(out, (image.shape[1], image.shape[0]))
print(out.shape)
out=cv.cvtColor(out,cv.COLOR_GRAY2BGR)
out = 255 * out
out = out.astype(np.uint8)
print(type(out))
print(np.max(out))
print(np.min(out))
print(out.shape)
print(image.shape)
con=np.concatenate((image,out),axis=1)
cv.imwrite('out.jpg',con)