-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_calib.py
236 lines (175 loc) · 8.23 KB
/
test_calib.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
import numpy as np
import cv2, PIL
from cv2 import aruco
import json
import time
import pyglet
from main import read_json
"""
This script is a test for the calibration of the webcam and the projector,
performed with autoCalib.py.
You must:
1. To print and cut one of the markers in the 'test_markers.pdf' file.
2. Complete the resolution and camera ID values in the 'configs.json' file.
3. Set the other values to 0 in the 'configs.json' file.
Pyglet seems to has a bug in that it displays the retina screen of macOS inverted.
--
This script works with OpenCV 4.7, not with all the examples that are giving back.
"""
def transform_coords(coord_np, matrix_array):
"""
Transforms the coordinates of a set of points using a given transformation matrix.
Args:
coord_np (numpy.ndarray): A numpy array of coordinates, where each row represents a point.
matrix_array (numpy.ndarray): A numpy array representing the transformation matrix.
Returns:
list: A list of transformed coordinates.
"""
# Let to be sure that input has the same depth as matrix_array
coord_np = coord_np.astype(np.float32)
# add a dimension to may use perspectiveTransform
coord_np = np.expand_dims(coord_np, axis=1)
coord_transformada = cv2.perspectiveTransform(coord_np, matrix_array)
# remove the extra dimension added previously
coord_transformada = np.squeeze(coord_transformada, axis=1)
# convert to a list
coord_transformada_ls = coord_transformada.tolist()
return coord_transformada_ls
def update(dt):
"""
Updates the window with each video frame. If a marker is detected, it draws a polygon around the marker's corners. Otherwise, it draws a white polygon with 0 opacity.
Args:
dt (float): The time since the last update, not used. But is passed by Pyglet.
Returns:
pyglet.shapes.Polygon: The polygon to be drawn on the window.
Issue:
Initially, I tried to use an sprite texture but there was a memory leak.
frame_pg = pyglet.image.ImageData(frame.shape[1], frame.shape[0], 'BGR', frame.tobytes())
frame_sprite = pyglet.sprite.Sprite(frame_pg)
So I decided fix it quickly and dirty saving the frame as a jpg file and drawing it with .blit
Could you use OpenGL shaders.
"""
global message
# Generate a polygon of any color with opacity 0 to draw if not entering the if
poly = pyglet.shapes.Polygon([400, 100], [500, 10], [600, 100], [550, 175], [450, 150],color=(255, 255, 255, 0))
ret,frame = cap.read()
if ret:
cv2.imwrite('captura.jpg',frame)
corners = busca_marcador(frame)
txt_message = "NO SE DETECTARON MARCADORES"
if corners.size > 0: # use .size because it a np array
corners_ls = transform_coords(corners, array_matrix)
poly = pyglet.shapes.Polygon(*corners_ls,color=(255, 255, 255, 255)) # * desempaca la lista
message = pyglet.text.Label(txt_message,
font_name='Arial',
font_size=14,
x= window_capture.width//2, y=window_capture.height-30,
anchor_x='center', anchor_y='center')
return poly
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.ESCAPE:
pyglet.app.exit()
def busca_marcador(image):
"""
Searches for 1 marker in the image passed as an argument.
If a marker is found, it returns a list with the coordinates of the corners.
If no markers are found, it returns an empty list as the coordinates.
"""
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters()
detector = aruco.ArucoDetector(aruco_dict, parameters)
corners, ids, rejected = detector.detectMarkers(image)
corners_result = np.array([]).astype(int)
# If there are markers detected
if len(corners) > 0: #ids is not None:
corners_result= np.array(corners).astype(int)
corners_result = corners_result.squeeze()
return corners_result
# --------- Main ---------------------------------------
if __name__ == '__main__':
# Read the JSON configuration file
projector_res_w, projector_res_h, camera_res_w, camera_res_h, camera_id, \
maker_width, padding, matriz = read_json('configs.json')
# Transform the matrix from the JSON file to be able to use it in .perspectiveTransform
array_matrix = np.array(matriz, dtype=np.float32)
array_matrix = array_matrix.reshape(3, 3)
# Get a list of all available screens
screens = pyglet.canvas.Display().get_screens()
print (screens)
# If the projector resolution is not specified, it takes the second screen's resolution
if projector_res_w == 0 or projector_res_h == 0 :
projector_res_w = screens[1].width
projector_res_h = screens[1].height
print (projector_res_w)
if maker_width == 0:
maker_width = int(projector_res_w * .1)
print (maker_width)
if padding == 0:
padding = int(maker_width*.2)
# Config 2 windows: one for projector and the other for the camera capture
window_capture = pyglet.window.Window(camera_res_w, camera_res_h,
screen=screens[0], caption='Captura RAW')
window_projector = pyglet.window.Window(screens[1].width, screens[1].height,
fullscreen=False, # cambiar a TRUE en implementacion
resizable=True,
screen=screens[1],
caption='Plantilla')
# Place the projector window in the 'second' screen
window_projector.set_location(screens[1].x,screens[1].y)
# Open the camera
try:
cap = cv2.VideoCapture(camera_id) # lo saca del JSON 0 si hay una camara
cap.set(3,camera_res_w)
cap.set(4,camera_res_h)
time.sleep(2)
print("-- CAMERA FOUND --- ")
except Exception as e:
print("-- CAN'T FOUND THE CAMERA -------")
print(str(e))
#sys.exit()
quit()
# Load the initial frame
ret, frame = cap.read()
if ret:
# That's the 'dirty' part, see documentation of 'update' function
cv2.imwrite('capture.jpg',frame)
# Config the event to update the camera window
a = pyglet.clock.schedule_interval(update, 1/30.0)
print (a)
# defino el rectangulo que voy a actualizar luego con la pos del marcador.
batch_mark = pyglet.graphics.Batch()
line2 = pyglet.shapes.Line(150, 150, 444, 111, width=4, color=(200, 20, 20), batch=batch_mark)
# Place the background image in the projector window
background = pyglet.image.load('background.jpg')
sprite_background = pyglet.sprite.Sprite(background)
sprite_background.scale = max(projector_res_w / sprite_background.width, projector_res_h / sprite_background.height)
sprite_background.position = (projector_res_w - sprite_background.width) / 2, (projector_res_h - sprite_background.height) / 2 , 0
# Create the key event handler
# Don't use '.on_key_press' because it's for a single window
window_capture.push_handlers(on_key_press)
window_projector.push_handlers(on_key_press)
# Create the label to show the initial message
message = pyglet.text.Label("CAN'T FIND THE ArUCO MARKER",
font_name='Arial',
font_size=14,
x= window_capture.width//2, y=window_capture.height-30,
anchor_x='center', anchor_y='center')
# Config the event to update the camera window
@window_capture.event
def on_draw():
window_capture.clear()
#frame_sprite.draw()
image = pyglet.image.load('capture.jpg')
image.blit(0,0)
message.draw()
window_projector.flip() # try to fix the macOS bug
# Config the event to update the projector window
@window_projector.event
def on_draw():
window_projector.clear()
sprite_background.draw()
linea1 = update(0)
linea1.draw()
# Run the application
pyglet.app.run()
cap.release()