Skip to content

Commit

Permalink
Merge XMega code
Browse files Browse the repository at this point in the history
  • Loading branch information
jpanikulam committed Oct 30, 2014
2 parents 2d33104 + eaf38ca commit 4103b42
Show file tree
Hide file tree
Showing 19 changed files with 570 additions and 175 deletions.
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,12 @@ All of this is written for Ubuntu 14.04 (Trusty Tahr) and ROS Indigo (ros.org/in
First, follow [this tutorial](https://help.github.com/articles/generating-ssh-keys/) for setting up git. Then follow the first three ROS tutorials, and make your catkin workspace.

Once you make your catkin workspace (the ROS tutorials will teach you how to do this), go to the src folder, and clone this repository
```cd ~/catkin_ws/src

git clone [email protected]:ufieeehw/IEEE2015.git```
```cd ~/catkin_ws/src```

```git clone [email protected]:ufieeehw/IEEE2015.git```

```catkin_make ~/catkin_ws```

Your directory structure should like like "~/catkin_ws/src/IEEE2015/ros/"

Expand Down
187 changes: 123 additions & 64 deletions python/SLAM/VisualCortex.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
from numpy.linalg import inv
import cv2


# TODO: Make nonchanging variables properties. Make changing variables
# non-properties
class VisualCortex:


Expand All @@ -26,7 +27,6 @@ class VisualCortex:
bird_y = None;

# Some useful private variables
_affine_matrix = None;
_map_features = None;
_view_features = None;
_transformed_view = None;
Expand All @@ -36,6 +36,7 @@ class VisualCortex:
[800],
[1]
];
_perspec_corners = None


####################################
Expand All @@ -62,15 +63,17 @@ def __init__(self, view_coordinates, map_coordinates, img):
self._initialize_cam_dimensions(img)

# Get the perspec transform matrix
# TODO: Get a more accurate transform
self._perspective_matrix = self._get_perspective_matrix(
view_coordinates, map_coordinates);
# Now that we have a matrix, get the bird's eye image dimensions
self._get_bird_dims()
self._calculate_perspec_corners()
# Transform the initial image to bird's eye
imgx = self.transform_image(img)
# And paste it on to the full_map using a basic translation matrix
self._initialize_affine_matrix()
self.stitch(imgx)
affine_matrix = self._initialize_affine_matrix()
self.stitch(imgx, affine_matrix)


####################################
Expand All @@ -85,19 +88,25 @@ def SLAM(self, image):
imgx = self.transform_image(image)
# Extract features from this image
kp1, des1 = self.feature_detect(imgx)
kp1, des1 = self._apply_roi(kp1, des1)
self._draw_features(imgx,kp1, None)
kp1, des1, bird_corners = self._apply_roi(kp1, des1)
self._draw_features(imgx,kp1, bird_corners)
# TODO: Use a database rather than re-ORBing on the full map
# Extract features from the full_map
kp2, des2 = self.feature_detect(self.full_map)
# Match features between imgx and full_map
# TODO: Do we need to input kp1 and kp2?
M, matches = self.feature_match(kp1, kp2, des1, des2)
self._draw_matches(imgx,self.full_map,kp1,kp2,matches,[0,1,2,3])
# Use matches to get the affine transform
self.get_affine_matrix(kp1, kp2, matches)
# TODO: Make affine matrix a variable and not a property, since
# it always changes at this point
affine_matrix = self.get_affine_matrix(kp1, kp2, matches)
# Use this affine matrix to update robot position
self.localize()
# TODO: Make position/rotation a property
self.localize(affine_matrix)
# Use this affine matrix to update full_map
self.stitch(imgx)
self.stitch(imgx, affine_matrix)
return;

"""Takes an image from the camera's video feed and transforms it into a
Expand Down Expand Up @@ -139,38 +148,39 @@ def feature_match(self, kp1, kp2, des1, des2):
matches = sorted(matches, key = lambda x: x.distance)

# Optionally store all the good matches as per Lowe's ratio test.
good = []
good_matches = []
for m in matches:
#if m.distance < 0.7*n.distance:
good.append(m)
good_matches.append(m)

# Make sure we found matches, and then use the matches objects to
# extract lists of points that correspond to the matches and
# determine the mask array based on locations. Use this mask to
# filter the full list of matches
if len(good)>0:
if len(good_matches)>0:
# TODO: 'splain the following 4 lines of code
query_pts = np.float32(
[ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
[ kp1[m.queryIdx].pt for m in good_matches ]).reshape(-1,1,2)
train_pts = np.float32(
[ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
[ kp2[m.trainIdx].pt for m in good_matches ]).reshape(-1,1,2)
# Run regression on the matches to filter outliers
M, mask = cv2.findHomography(query_pts, train_pts, cv2.RANSAC,20.0)
matches_mask = mask.ravel().tolist()
# Use this mask immediately to get the good matches
good = self._filter_matches(good, matches_mask)
# Use this mask immediately to get the good_matches matches
good_matches = self._filter_matches(good_matches, matches_mask)
else:
print "Not enough matches are found - %d/%d" % (len(good), 10)
print "Not enough matches are found - %d/%d" % (len(good_matches), 10)
matches_mask = None
M = None
return M, good
return M, good_matches

"""Using the matches and mask between the full_map and perspec transformed
image, determine the affine transformation that will map the smaller
onto the bigger.
"""
def get_affine_matrix(self, kp1, kp2, matches):
# Indices of which points in "matches" to use
point1 = 0;
point1 = 3;
point2 = 1;
point3 = 2;
# Gather the coordinates in train img that we go TO
Expand All @@ -186,26 +196,44 @@ def get_affine_matrix(self, kp1, kp2, matches):
]
)
# Transform the query image
self._affine_matrix = cv2.getAffineTransform(queryPts,trainPts);
return None;
return cv2.getAffineTransform(queryPts,trainPts);

"""Apply the latest _affine_matrix to the _robot_coordinates so that we
can update the robot's location in the full_map
"""
def localize(self):
self.position = np.dot(self._affine_matrix, self._robot_coordinates)
# TODO: Figure out how to extract rotation from the affine matrix
self.rotation = 0;

def localize(self, affine_matrix):
"""Apply the latest _affine_matrix to the _robot_coordinates so that we
can update the robot's location in the full_map
"""
self.position = np.dot(affine_matrix, self._robot_coordinates)
bearing = np.dot(affine_matrix, [[self.robot_coordinates(0)],
[self._robot_coordinates(1) - bird_y/2],
[1]
])
# Note that delta_y seems flipped. This is because x coordinates go left to right (normal)
# but y coordinates go top to bottom (flipped). As Bill Nye would say, consider the following:
# O (bigger x, smaller y)
# | /
# | /
# | /
# |ang /
# | /
# | /
# | / (smaller x, bigger y)
# O
delta_x = bearing(0) - self.position(0)
delta_y = self.position(1) - bearing(1)
# Gives rotation in radians East of North
self.rotation = np.arctan(delta_y/delta_x)
return None;

""" Update the full_map with the latest image, using the
most up-to-date _affine_matrix"""
def stitch(self, imgx):
def stitch(self, imgx, affine_matrix):
# Transform imgx into something the size of the full map using affine
rows, cols = self.full_map.shape
fullimgx = cv2.warpAffine(imgx, self._affine_matrix, (cols,rows))
fullimgx = cv2.warpAffine(imgx, affine_matrix, (cols,rows))
fullimg2 = self.full_map
# Add the images together
# TODO: Use "blip" or something to add the images together
self.full_map = fullimgx/2 + self.full_map/2;
# Clean up the "ghosting" on the resulting image
dump, self.full_map = cv2.threshold(self.full_map, 50, 255,
Expand All @@ -218,6 +246,39 @@ def stitch(self, imgx):
##### 4. PRIVATE METHODS #####
####################################

# Put green dots on each of the features
def _draw_features(self, image, kp, bird_corners):
# Turn this image into color so we see green dots
color_img = cv2.cvtColor(image , cv2.COLOR_GRAY2RGB)
# Plot the keypoints
for i in range(0,len(kp)):
# Extract coordinates of these keypoints
feature1 = kp[i].pt;
# Offset feature2
feature1 = tuple([int(feature1[0]), int(feature1[1])]);
# Put dots on these features
cv2.circle(color_img, feature1, 1, (0,255,0), -1);

if (bird_corners != None):
cv2.line(color_img, tuple([int(bird_corners[0][0]),int(bird_corners[1][0])]),
tuple([int(bird_corners[0][1]),int(bird_corners[1][1])]),
(255,0,0))
cv2.line(color_img, tuple([int(bird_corners[0][1]),int(bird_corners[1][1])]),
tuple([int(bird_corners[0][2]),int(bird_corners[1][2])]),
(255,0,0))
cv2.line(color_img, tuple([int(bird_corners[0][2]),int(bird_corners[1][2])]),
tuple([int(bird_corners[0][3]),int(bird_corners[1][3])]),
(255,0,0))
cv2.line(color_img, tuple([int(bird_corners[0][3]),int(bird_corners[1][3])]),
tuple([int(bird_corners[0][0]),int(bird_corners[1][0])]),
(255,0,0))
while(1):
cv2.imshow('full',color_img);

if cv2.waitKey(20) & 0xFF == 27:
break;
cv2.destroyAllWindows()

# Read off the dimensions of an image
def _initialize_cam_dimensions(self, image):
self.cam_y, self.cam_x = image.shape
Expand All @@ -242,13 +303,13 @@ def _tune_output_square(self, scale,translate,points):
newpoints[n][1] = avg_y - (avg_y - points[n][1])*scale + translate[1]
return newpoints

# TODO: Or do we want to initialize this with the self.position
def _initialize_affine_matrix(self):
self._affine_matrix = np.float32([[1, 0, (self.full_map_x-self.bird_x)/2],
return np.float32([[1, 0, (self.full_map_x-self.bird_x)/2],
[0, 1, (self.full_map_y-self.bird_y)/2]
]
)
return;


# Figure out the dimensions of the image that the perspective transform
# maps to.
def _get_bird_dims(self):
Expand All @@ -270,20 +331,25 @@ def _get_bird_dims(self):
self.bird_y = corner[1] / corner[2]
return

# Applies a range-of-interest mask over the kp and des because
# we don't want the corners of the transformed trapezoid to show
# up as features
def _apply_roi(self,kp,des):
#created this method in response to the todo comment
def _calculate_perspec_corners(self):
# TODO: These coordinates are static, so we don't need to keep
# recalculating every time we run _apply_roi
# Figure out where the corners of the perspective image map to in the
# bird's eye image
c = 5 # Number of pixels to cushion the border with
perspec_corners = [[c, self.cam_x - c, self.cam_x - c, c ],
[c, c , self.cam_y - c, self.cam_y - c],
[1, 1 , 1 , 1 ]
]
bird_homo_corners = np.dot(self._perspective_matrix, perspec_corners)
c = 10 # Number of pixels to cushion the border with
self._perspec_corners = [[c, self.cam_x - c, self.cam_x - c, c ],
[c, c , self.cam_y - c, self.cam_y - c],
[1, 1 , 1 , 1 ]
]
return


# Applies a range-of-interest mask over the kp and des because
# we don't want the corners of the transformed trapezoid to show
# up as features
def _apply_roi(self,kp,des):
bird_homo_corners = np.dot(self._perspective_matrix, self._perspec_corners)
# Initialize the output corners
bird_corners = [[1, 1, 1, 1],
[1, 1, 1, 1]]
Expand All @@ -297,7 +363,7 @@ def _apply_roi(self,kp,des):
# Check each kp to see if it is in the polygon defined by bird_corners
# by checking if its x-position falls between the diagonal lines
# and its y-position is between the horizontal lines
include = []
mask = []
for i in range(0,len(kp)):
keep = 0
if ((kp[i].pt[1] > bird_corners[1][0]) and (kp[i].pt[1] < bird_corners[1][2])):
Expand All @@ -310,18 +376,17 @@ def _apply_roi(self,kp,des):
right_x = bird_corners[0][2] + delta
if (kp[i].pt[0] < right_x):
keep = 1;
if (keep == 1):
include.append(i)

kp1 = []
des1 = []

for i in include:
kp1.append(kp[i])
des1.append(des[i])
mask.append(keep);

# Filter out the points outside the mask
kp_with_mask = zip(kp,mask)
des_with_mask = zip(des,mask)
kp1 = [item[0] for item in kp_with_mask if item[1] == 1]
des1 = [item[0] for item in des_with_mask if item[1] == 1]
# Convert des1 to 2d array to make opencv happy
des1 = np.array(des1)

return kp1, des1
return kp1, des1, bird_corners

# Useful tool for drawing matches between two images
def _draw_matches(self, img1, img2, kp1, kp2, matches, numberofpoints):
Expand Down Expand Up @@ -365,15 +430,9 @@ def _draw_matches(self, img1, img2, kp1, kp2, matches, numberofpoints):

# Given the mask generated from BF, use it to filter out the
# outliers from matches list
def _filter_matches(self, good, matches_mask):
removeAt = [];
count = 0;
for i in matches_mask:
if i == 0:
removeAt.append(count)
count = count + 1
for i in reversed(removeAt):
good.pop(i)
def _filter_matches(self, matches, matches_mask):
matches_with_mask = zip(matches,matches_mask)
good = [item[0] for item in matches_with_mask if item[1] == 1]
return good


Expand All @@ -388,7 +447,7 @@ def _filter_matches(self, good, matches_mask):

# Set filename and read it into an opencv object
img_location = 'cap1.jpg'
img = cv2.cvtColor(cv2.imread(img_location) , cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(cv2.imread(img_location), cv2.COLOR_BGR2GRAY)
# Create a new VC object
VC = VisualCortex(view_coordinates,map_coordinates,img);

Expand All @@ -402,4 +461,4 @@ def _filter_matches(self, good, matches_mask):

if cv2.waitKey(20) & 0xFF == 27:
break;
cv2.destroyAllWindows()
cv2.destroyAllWindows()
8 changes: 4 additions & 4 deletions ros/ieee2015_robot/urdf/IEEEurdf.URDF
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@

<!-- Camera -->
<joint name="camera_joint" type="fixed">
<origin xyz="0 -.15 -.015" rpy="0 0 -1.57"/>
<origin xyz="0 -.15 .020" rpy="0 .587 -1.57"/>
<parent link="base_link"/>
<child link="camera_link"/>
<axis xyz="1 0 0" />
Expand Down Expand Up @@ -566,10 +566,10 @@
<sensor type="camera" name="camera1">
<update_rate>30.0</update_rate>
<camera name="head">
<horizontal_fov>1.3962634</horizontal_fov>
<horizontal_fov>1.36</horizontal_fov>

<clip>
Expand Down
Loading

0 comments on commit 4103b42

Please sign in to comment.