diff --git a/command/sub8_missions/sub8_missions/arm_torpedos.py b/command/sub8_missions/sub8_missions/arm_torpedos.py new file mode 100644 index 00000000..ae8fd4a5 --- /dev/null +++ b/command/sub8_missions/sub8_missions/arm_torpedos.py @@ -0,0 +1,216 @@ +from txros import util +import tf +import rospy +import numpy as np +from mil_ros_tools import rosmsg_to_numpy +import visualization_msgs.msg as visualization_msgs +from visualization_msgs.msg import Marker, MarkerArray +from geometry_msgs.msg import Point, Vector3 +from mil_misc_tools import FprintFactory +from .sub_singleton import SubjuGator + +MISSION = 'Torpedo Challenge' + + +class Target(object): + + def __init__(self): + self.position = None + self.destroyed = False + + def set_destroyed(self): + self.destroyed = True + + def update_position(self, pos): + self.position = pos + + +class FireTorpedos(SubjuGator): + ''' + Mission to solve the torpedo RoboSub challenge. + + This code was based off of the Buoy mission code written by Kevin Allen. + Its goal is to search for a target on the torpedo board and fire at it. + ''' + TIMEOUT_SECONDS = 15 + Z_PATTERN_RADIUS = 1 + X_PATTERN_RADIUS = 1.0 + X_OFFSET = .2 + Z_OFFSET = .2 + BACKUP_METERS = 3.0 + BLIND = True + def __init__(self): + self.print_info = FprintFactory(title=MISSION).fprint + self.print_bad = FprintFactory(title=MISSION, msg_color="red").fprint + self.print_good = FprintFactory( + title=MISSION, msg_color="green").fprint + + # B = bottom; T = Top; L = left; R = right; C = center; O = unblocked; + # X = blocked; + self.targets = { + 'TCX': Target(), + 'TRX': Target(), + 'TLX': Target(), + 'BCO': Target() + } + self.pattern_done = False + self.done = False + self.ltime = None + + def generate_pattern(self): + z = self.Z_PATTERN_RADIUS + X = self.X_PATTERN_RADIUS + self.moves = [[0, X, -z], [0, -X, 0], [0, X, z], [0, -X, 0]] + self.move_index = 0 + + @util.cancellableInlineCallbacks + def search(self): + global markers + markers = MarkerArray() + pub_markers = yield self.nh.advertise('/torpedo/rays', MarkerArray) + while True: + info = 'CURRENT TARGETS: ' + + target = 'BCO' + pub_markers.publish(markers) + ''' + In the event we want to attempt other targets beyond bare minimum + for target in self.targets. Eventually we will want to take the + best target, which is the TCX target. Priority targetting will + come once we confirm we can actually unblock currently blocked + targets. + ''' + print("REQUESTING POSE") + res = yield self.vision_proxies.xyz_points.get_pose( + target='board') + if res.found: + print("POSE FOUND!") + self.ltime = res.pose.header.stamp + self.targets[target].update_position( + rosmsg_to_numpy(res.pose.pose.position)) + self.normal = rosmsg_to_numpy(res.pose.pose.orientation)[:3] + marker = Marker( + ns='torp_board', + action=visualization_msgs.Marker.ADD, + type=Marker.ARROW, + scale=Vector3(0.2, 0.5, 0), + points=np.array([Point(0, 0, 0), + res.pose.pose.position])) + marker.id = 3 + marker.header.frame_id = '/base_link' + marker.color.r = 1 + marker.color.g = 0 + marker.color.a = 1 + markers.markers.append(marker) + if self.targets[target].position is not None: + print("TARGET IS NOT NONE") + info += target + ' ' + yield self.nh.sleep(0.1) # Throttle service calls + self.print_info(info) + + @util.cancellableInlineCallbacks + def pattern(self): + self.print_info('Descending to Depth...') + #yield self.move.depth(1.5).go(blind=self.BLIND, speed=0.1) + yield self.move.left(1).go(blind=self.BLIND, speed=0.5) + yield self.nh.sleep(2) + yield self.move.right(2).go(blind=self.BLIND, speed=0.5) + yield self.nh.sleep(2) + yield self.move.left(1).go(blind=self.BLIND, speed=0.5) + yield self.nh.sleep(2) + yield self.move.down(0.5).go(blind=self.BLIND,speed=0.5) + yield self.nh.sleep(2) + #def err(): + #self.print_info('Search pattern canceled') + + #self.pattern_done = False + #for i, move in enumerate(self.moves[self.move_index:]): + #move = self.move.relative(np.array(move)).go(blind=self.BLIND, speed=0.1) + #yield self.nh.sleep(2) + #move.addErrback(err) + #yield move + #self.move_index = i + 1 + #self.print_bad('Pattern finished. Firing at any locked targets.') + #self.pattern_done = True + + @util.cancellableInlineCallbacks + def fire(self, target): + self.print_info("FIRING {}".format(target)) + target_pose = self.targets[target].position + yield self.move.go(blind=self.BLIND, speed=0.1) # Station hold + transform = yield self._tf_listener.get_transform('/map', '/base_link') + # target_position = transform._q_mat.dot( + # target_pose - transform._p) + + sub_pos = yield self.tx_pose() + print('Current Sub Position: ', sub_pos) + + #sub_pos = transform._q_mat.dot( + # (sub_pos[0]) - transform._p) + #target_position = sub_pos[0] - target_pose + print('Moving to Target Position: ', target_pose) + target_position = target_pose + self.normal + # yield self.move.look_at_without_pitching(target_position).go( + # blind=self.BLIND, speed=.25) + print('Target normal: ', self.normal) + print('Point: ', target_position) + yield self.move.set_position(np.array([target_position[0], target_position[1], target_position[2]])).go(blind=True, speed=.5) + + self.print_good( + "{} locked. Firing torpedos. Hit confirmed, good job Commander.". + format(target)) + sub_pos = yield self.tx_pose() + print('Current Sub Position: ', sub_pos) + yield self.actuators.shoot_torpedo1() + yield self.actuators.shoot_torpedo2() + self.done = True + + def get_target(self): + target = 'BCO' + ''' + Returns the target we are going to focus on. Loop through priorities. + Highest priority is the TCX target, followed by the TRX and TLX. + Lowest priority is the BCO target. Targets are ordered in the list + already, so this will take the first target it can actually find. + Currently this will always by the BCO target. This is because we don't + differentiate between targets currently as we cannot unblock the + blocked ones. This means there is only one target for us to lock onto. + ''' + if self.targets[target].destroyed: + pass + # temp = target + elif self.targets[target].position is not None: + return target + elif self.pattern_done and self.targets[target].position is not None: + return target + else: + return None + + @util.cancellableInlineCallbacks + def run(self, args): + # start_time = yield self.nh.get_time() # Store time mission + # starts for timeout + + self.print_info("Enabling Perception") + self.print_info("{}, Ree".format(self.vision_proxies.xyz_points)) + self.vision_proxies.xyz_points.start() + self.generate_pattern() + pattern = self.pattern() + self.do_search = True + search = self.search() + while not self.done: + t = self.get_target() + if t is not None: + pattern.cancel() + yield self.fire(t) + self.targets[t].set_destroyed() + if not self.done: + pattern = self.pattern() + elif self.pattern_done: + break + yield self.nh.sleep(0.1) + search.cancel() + pattern.cancel() + self.vision_proxies.xyz_points.stop() + self.print_good('Done!') + diff --git a/command/sub8_missions/sub8_missions/dracula_grab.py b/command/sub8_missions/sub8_missions/dracula_grab.py new file mode 100644 index 00000000..5074869f --- /dev/null +++ b/command/sub8_missions/sub8_missions/dracula_grab.py @@ -0,0 +1,89 @@ +from .sub_singleton import SubjuGator +from txros import util +import numpy as np +from sensor_msgs.msg import CameraInfo +from image_geometry import PinholeCameraModel +import mil_ros_tools +from twisted.internet import defer +from geometry_msgs.msg import Point +from mil_misc_tools import text_effects +from std_srvs.srv import SetBool, SetBoolRequest + +from sub8_msgs.srv import GuessRequest, GuessRequestRequest + +import mil_ros_tools + +fprint = text_effects.FprintFactory(title="DRACULA_GRAB", msg_color="cyan").fprint + +SPEED = 0.25 +FAST_SPEED = 1 + +SEARCH_HEIGHT = 1.5 +HEIGHT_DRACULA_GRABBER = 0.5 +TRAVEL_DEPTH = 0.5 # 2 + + +class DraculaGrabber(SubjuGator): + + @util.cancellableInlineCallbacks + def run(self, args): + fprint('Enabling cam_ray publisher') + + yield self.nh.sleep(1) + + fprint('Connecting camera') + + cam_info_sub = yield self.nh.subscribe( + '/camera/down/camera_info', + CameraInfo) + + fprint('Obtaining cam info message') + cam_info = yield cam_info_sub.get_next_message() + cam_center = np.array([cam_info.width/2, cam_info.height/2]) + cam_norm = np.sqrt(cam_center[0]**2 + cam_center[1]**2) + fprint('Cam center: {}'.format(cam_center)) + + model = PinholeCameraModel() + model.fromCameraInfo(cam_info) + + # enable_service = self.nh.get_service_client("/vamp/enable", SetBool) + # yield enable_service(SetBoolRequest(data=True)) + + try: + vamp_txros = yield self.nh.get_service_client('/guess_location', + GuessRequest) + dracula_req = yield vamp_txros(GuessRequestRequest(item='dracula')) + if dracula_req.found is False: + use_prediction = False + fprint( + 'Forgot to add dracula to guess?', + msg_color='yellow') + else: + fprint('Found dracula.', msg_color='green') + yield self.move.set_position(mil_ros_tools.rosmsg_to_numpy(dracula_req.location.pose.position)).depth(TRAVEL_DEPTH).go(speed=FAST_SPEED) + except Exception as e: + fprint(str(e) + 'Forgot to run guess server?', msg_color='yellow') + + dracula_sub = yield self.nh.subscribe('/bbox_pub', Point) + yield self.move.to_height(SEARCH_HEIGHT).zero_roll_and_pitch().go(speed=SPEED) + + while True: + fprint('Getting location of ball drop...') + dracula_msg = yield dracula_sub.get_next_message() + dracula_xy = mil_ros_tools.rosmsg_to_numpy(dracula_msg)[:2] + vec = dracula_xy - cam_center + fprint("Vec: {}".format(vec)) + vec = vec / cam_norm + vec[1] = -vec[1] + fprint("Rel move vec {}".format(vec)) + if np.allclose(vec, np.asarray(0), atol=50): + break + vec = np.append(vec, 0) + + yield self.move.relative_depth(vec).go(speed=SPEED) + + fprint('Centered, going to depth {}'.format(HEIGHT_DRACULA_GRABBER)) + yield self.move.to_height(HEIGHT_DRACULA_GRABBER).zero_roll_and_pitch().go(speed=SPEED) + fprint('Dropping marker') + yield self.actuators.gripper_close() + diff --git a/command/sub8_missions/sub8_missions/vampire_slayer.py b/command/sub8_missions/sub8_missions/vampire_slayer.py index 5a87e554..b4c61645 100644 --- a/command/sub8_missions/sub8_missions/vampire_slayer.py +++ b/command/sub8_missions/sub8_missions/vampire_slayer.py @@ -10,6 +10,9 @@ from visualization_msgs.msg import Marker from mil_misc_tools import text_effects from std_srvs.srv import SetBool, SetBoolRequest +from visualization_msgs.msg import Marker, MarkerArray +from mil_misc_tools import FprintFactory +from mil_ros_tools import rosmsg_to_numpy from sub8_msgs.srv import GuessRequest, GuessRequestRequest @@ -21,11 +24,15 @@ SPEED = 0.25 - +X_OFFSET = 0 +Y_OFFSET = 0 +Z_OFFSET = 0 class VampireSlayer(SubjuGator): @util.cancellableInlineCallbacks def run(self, args): + self.vision_proxies.xyz_points.start() + global SPEED global pub_cam_ray fprint('Enabling cam_ray publisher') @@ -47,87 +54,100 @@ def run(self, args): # enable_service = self.nh.get_service_client("/vamp/enable", SetBool) # yield enable_service(SetBoolRequest(data=True)) - try: - vamp_txros = yield self.nh.get_service_client('/guess_location', - GuessRequest) - vamp_req = yield vamp_txros(GuessRequestRequest(item='vampire_slayer')) - if vamp_req.found is False: - use_prediction = False - fprint( - 'Forgot to add vampires to guess?', - msg_color='yellow') - else: - fprint('Found vamires.', msg_color='green') - yield self.move.set_position(mil_ros_tools.rosmsg_to_numpy(vamp_req.location.pose.position)).depth(0.5).go(speed=0.4) - except Exception as e: - fprint(e) - - dice_sub = yield self.nh.subscribe('/bbox_pub', Point) - - found = {} - history_tf = {} - while len(found) != 1: - fprint('Getting target vampire xy') - dice_xy = yield dice_sub.get_next_message() - found[dice_xy.z] = mil_ros_tools.rosmsg_to_numpy(dice_xy)[:2] - fprint(found) - out = yield self.get_transform(model, found[dice_xy.z]) - history_tf[dice_xy.z] = out - - # yield enable_service(SetBoolRequest(data=False)) - - start = self.move.zero_roll_and_pitch() - yield start.go() - - for i in range(1): - fprint('Hitting Vampire {}'.format(i)) - # Get one of the dice - dice, xy = found.popitem() - fprint('Vampire: {}'.format(dice)) - ray, base = history_tf[dice] - - where = base + 3 * ray - - fprint(where) - fprint('Moving!', msg_color='yellow') - fprint('Current position: {}'.format(self.pose.position)) - fprint('zrp') - yield self.move.zero_roll_and_pitch().go(blind=True) - yield self.nh.sleep(4) - fprint('hitting', msg_color='yellow') - yield self.move.look_at(where).go(blind=True, speed=SPEED) - yield self.nh.sleep(4) - yield self.move.set_position(where).go(blind=True, speed=SPEED) - yield self.nh.sleep(4) - fprint('going back', msg_color='yellow') - yield start.go(blind=True, speed=SPEED) - yield self.nh.sleep(4) - - @util.cancellableInlineCallbacks - def get_transform(self, model, point): - fprint('Projecting to 3d ray') - ray = np.array(model.projectPixelTo3dRay(point)) - fprint("ddray {}".format(ray)) - fprint('Transform') - transform = yield self._tf_listener.get_transform('/map', 'front_left_cam_optical') - ray = transform._q_mat.dot(ray) - # ray = ray / np.linalg.norm(ray) - marker = Marker( - ns='vamp', - action=visualization_msgs.Marker.ADD, - type=Marker.ARROW, - scale=Vector3(0.2, 0.2, 2), - points=np.array([ - Point(transform._p[0], transform._p[1], transform._p[2]), - Point(transform._p[0] + ray[0], transform._p[1] + ray[1], - transform._p[2] + ray[2]), - ])) - marker.header.frame_id = '/map' - marker.color.r = 0 - marker.color.g = 1 - marker.color.b = 0 - marker.color.a = 1 - global pub_cam_ray - pub_cam_ray.publish(marker) - fprint('ray: {}, origin: {}'.format(ray, transform._p)) - defer.returnValue((ray, transform._p)) +# try: +# vamp_txros = yield self.nh.get_service_client('/guess_location', +# GuessRequest) +# vamp_req = yield vamp_txros(GuessRequestRequest(item='vampire_slayer')) +# if vamp_req.found is False: +# use_prediction = False +# fprint( +# 'Forgot to add vampires to guess?', +# msg_color='yellow') +# else: +# fprint('Found vampires.', msg_color='green') +# yield self.move.set_position(mil_ros_tools.rosmsg_to_numpy(vamp_req.location.pose.position)).depth(0.5).go(speed=0.4) +# except Exception as e: +# fprint(e) + + markers = MarkerArray() + pub_markers = yield self.nh.advertise('/torpedo/rays', MarkerArray) + pub_markers.publish(markers) + ''' + Move Pattern + ''' + yield self.move.left(1).go() + yield self.nh.sleep(2) + yield self.move.right(2).go() + yield self.nh.sleep(2) + yield self.move.down(.5).go() + yield self.nh.sleep(2) + yield self.move.up(.5).go() + yield self.move.forward(.75).go() + yield self.nh.sleep(2) + yield self.move.right(.75).go() + yield self.nh.sleep(2) + yield self.move.backward(.75).go() + yield self.move.left(1).go() + ''' + Did we find something? + ''' + res = yield self.vision_proxies.xyz_points.get_pose( + target='buoy') + MISSION = 'Vampires' + print_info = FprintFactory(title=MISSION).fprint + + if res.found: + print_info("CHARGING BUOY") + target_pose = rosmsg_to_numpy(res.pose.pose.position) + target_normal = rosmsg_to_numpy(res.pose.pose.orientation)[:2] + print('Normal: ', target_normal) + yield self.move.go(blind=True, speed=0.1) # Station hold + transform = yield self._tf_listener.get_transform('/map', '/base_link') + target_position = target_pose +# target_position = target_pose / target_normal + + + + sub_pos = yield self.tx_pose() + print('Current Sub Position: ', sub_pos) + + print('Map Position: ', target_position) + #sub_pos = transform._q_mat.dot(sub_pos[0] - transform._p) + #target_position = target_position - sub_pos[0] + # yield self.move.look_at_without_pitching(target_position).go(blind=True, speed=.25) + #yield self.move.relative(np.array([0, target_position[1], 0])).go(blind=True, speed=.1) + # Don't hit buoy yet + print("MOVING TO X: ", target_position[0]) + print("MOVING TO Y: ", target_position[1]) + yield self.move.set_position(np.array([target_position[0], target_position[1], target_position[2]])).go( + blind=True, speed=.1) + # Go behind it + #print('Going behind target') + #yield self.move.right(4).go(speed=1) + #yield self.move.forward(4).go(speed=1) + #yield self.move.left(4).go(speed=1) + # Hit buoy + #print('Hitting Target') + #yield self.move.strafe_backward(Y_OFFSET).go(speed=1) + print_info( + "Slaying the Vampire, good job Inquisitor.") + sub_pos = yield self.tx_pose() + print('Current Sub Position: ', sub_pos) + marker = Marker( + ns='buoy', + action=visualization_msgs.Marker.ADD, + type=Marker.ARROW, + scale=Vector3(0.2, 0.5, 0), + points=np.array([Point(0, 0, 0), + res.pose.pose.position])) + marker.id = 3 + marker.header.frame_id = '/base_link' + marker.color.r = 1 + marker.color.g = 0 + marker.color.a = 1 + markers.markers.append(marker) + pub_markers.publish(markers) + + yield self.nh.sleep(0.5) # Throttle service calls + # print_info(info) + self.vision_proxies.xyz_points.stop() diff --git a/perception/sub8_perception/CMakeLists.txt b/perception/sub8_perception/CMakeLists.txt index 7a31eb27..b0d9ef0f 100644 --- a/perception/sub8_perception/CMakeLists.txt +++ b/perception/sub8_perception/CMakeLists.txt @@ -7,6 +7,7 @@ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp -g -rdynamic -Wall -std=c++11 " find_package(catkin REQUIRED COMPONENTS roscpp + dynamic_reconfigure rospy eigen_conversions rostime @@ -25,6 +26,10 @@ find_package(catkin catkin_python_setup() +generate_dynamic_reconfigure_options( + cfg/VampireIdentifier.cfg +) + catkin_package( INCLUDE_DIRS include diff --git a/perception/sub8_perception/cfg/VampireIdentifier.cfg b/perception/sub8_perception/cfg/VampireIdentifier.cfg new file mode 100755 index 00000000..0fb41aa5 --- /dev/null +++ b/perception/sub8_perception/cfg/VampireIdentifier.cfg @@ -0,0 +1,14 @@ +#! /usr/bin/env python +PACKAGE = 'sub8_perception' +from dynamic_reconfigure.parameter_generator_catkin import * +gen = ParameterGenerator() +# using strings because there seems to be no vector support (currently) +gen.add('target', str_t, 0, 'Target Vampire', 'drac') +gen.add('override', bool_t, 0, 'Override Vampire Default Values?', False) +gen.add('dyn_lower', str_t, 0, 'Lower RGB Threshold', '0,0,80') +gen.add('dyn_upper', str_t, 0, 'Upper RGB Threshold', '200, 200, 250') +gen.add('min_trans', double_t, 0, 'Minimum Transformation for Observation (meters)', .05, 0, 2) +gen.add('max_velocity', int_t, 0, 'Maximum Velocity for Observation', 1) +gen.add('timeout', int_t, 0, 'Maximum time until an Observation expires', 25000, 0, 50000) +gen.add('min_obs', int_t, 0, 'Minimum Observations before Point Estimation', 10, 4, 16) +exit(gen.generate(PACKAGE, PACKAGE, 'VampireIdentifier')) diff --git a/perception/sub8_perception/nodes/observation_pub.py b/perception/sub8_perception/nodes/observation_pub.py new file mode 100755 index 00000000..fd525c5c --- /dev/null +++ b/perception/sub8_perception/nodes/observation_pub.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python +from __future__ import print_function +import sys +import tf +import mil_ros_tools +import cv2 +import numpy as np +import rospy +from std_msgs.msg import Header +from collections import deque +from sensor_msgs.msg import Image, RegionOfInterest +from cv_bridge import CvBridgeError +from sub8_vision_tools import MultiObservation +from image_geometry import PinholeCameraModel +from std_srvs.srv import SetBool, SetBoolResponse +from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion +from sub8_msgs.srv import VisionRequest, VisionRequestResponse +from mil_ros_tools import Image_Subscriber, Image_Publisher +from cv_bridge import CvBridge +from mil_misc_tools import FprintFactory +from visualization_msgs.msg import Marker + + +''' +Perception component of the Torpedo Board Challenge. Utilizes code from +the pyimagesearch blog post on color thresholding and shape detection +as well as code from the buoy_finder mission of previous years. +''' +MISSION='PERCEPTION' + + +class MultiObs: + + def __init__(self): + + # Pull constants from config file + self.min_trans = rospy.get_param('~min_trans', .25) + self.max_velocity = rospy.get_param('~max_velocity', 1) + self.min_observations = rospy.get_param('~min_observations', 30) + self.camera = rospy.get_param('~camera_topic', + '/camera/front/left/image_rect_color') + # Instantiate remaining variables and objects + self._observations = deque() + self._pose_pairs = deque() + self._times = deque() + self._observations1 = deque() + self._pose_pairs1 = deque() + self._times1 = deque() + self._observations2 = deque() + self._pose_pairs2 = deque() + self._times2 = deque() + self.last_image_time = None + self.last_image = None + self.tf_listener = tf.TransformListener() + self.status = '' + self.est = None + self.est1 = None + self.est2 = None + self.plane = None + self.visual_id = 0 + self.enabled = False + self.bridge = CvBridge() + self.print_info = FprintFactory(title=MISSION).fprint + # Image Subscriber and Camera Information + self.point_sub = rospy.Subscriber( + '/roi_pub', RegionOfInterest, self.acquire_targets) + self.image_sub = Image_Subscriber(self.camera, self.image_cb) + + self.camera_info = self.image_sub.wait_for_camera_info() + self.camera_model = PinholeCameraModel() + self.camera_model.fromCameraInfo(self.camera_info) + self.frame_id = 'front_left_cam_optical' + self.markerPub0 = rospy.Publisher('estMarker0', Marker, queue_size=1) + self.markerPub1 = rospy.Publisher('estMarker1', Marker, queue_size=1) + self.markerPub2 = rospy.Publisher('estMarker2', Marker, queue_size=1) + # Ros Services so mission can be toggled and info requested + rospy.Service('~enable', SetBool, self.toggle_search) + self.multi_obs = MultiObservation(self.camera_model) + rospy.Service('~pose', VisionRequest, self.request_board3d) + + # Debug + self.debug = rospy.get_param('~debug', True) + + def image_cb(self, image): + ''' + Run each time an image comes in from ROS. If enabled, + attempt to find the torpedo board. + ''' + return None + + def toggle_search(self, srv): + ''' + Callback for standard ~enable service. If true, start + looking at frames for buoys. + ''' + if srv.data: + rospy.loginfo("TARGET ACQUISITION: enabled") + self.enabled = True + + else: + rospy.loginfo("TARGET ACQUISITION: disabled") + self.enabled = False + + return SetBoolResponse(success=True) + + def request_board3d(self, srv): + ''' + Callback for 3D vision request. Uses recent observations of target + board specified in target_name to attempt a least-squares position + estimate. Ignoring orientation of board. + ''' + if not self.enabled: + print("REEEE") + return VisionRequestResponse(found=False) + #buoy = self.buoys[srv.target_name] + if self.est is None: + self.print_info("NO ESTIMATE!") + return VisionRequestResponse(found=False) + # NOTE: returns normal vec encoded into a quaternion message (so not actually a quaternion) + return VisionRequestResponse( + pose=PoseStamped( + header=Header(stamp=rospy.Time.now(), frame_id='/map'), + pose=Pose(position=Point(*((self.est + self.est1 + self.est2) / 3)), + orientation=Quaternion(x=self.plane[0], y=self.plane[1], z=self.plane[2]))), + found=True) + + def clear_old_observations(self): + # Observations older than three seconds are discarded. + # print(self._observations) + # print(self._observations1) + # print(self._observations2) + return + + def size(self): + return len(self._observations) + + def add_observation(self, obs, pose_pair, time): + if len(self._pose_pairs) == 0 or np.linalg.norm( + self._pose_pairs[-1][0] - pose_pair[0]) > self.min_trans: + self._observations.append(obs) + self._pose_pairs.append(pose_pair) + self._times.append(time) + + def add_observation1(self, obs, pose_pair, time): + if len(self._pose_pairs1) == 0 or np.linalg.norm( + self._pose_pairs1[-1][0] - pose_pair[0]) > self.min_trans: + self._observations1.append(obs) + self._pose_pairs1.append(pose_pair) + self._times1.append(time) + return (self._observations1, self._pose_pairs1) + + def add_observation2(self, obs, pose_pair, time): + self.clear_old_observations() + if len(self._pose_pairs2) == 0 or np.linalg.norm( + self._pose_pairs2[-1][0] - pose_pair[0]) > self.min_trans: + self._observations2.append(obs) + self._pose_pairs2.append(pose_pair) + self._times2.append(time) + return (self._observations2, self._pose_pairs2) + + def get_observations_and_pose_pairs(self): + self.clear_old_observations() + return (self._observations, self._pose_pairs) + + def generate_plane(self): + ab = self.est - self.est1 + ac = self.est - self.est2 + # print("AB: ", ab) + # print("AC: ", ac) + x = np.cross(ab,ac) + return x / np.linalg.norm(x) + + def marker_msg(self, point, point_name): + robotMarker = Marker() + robotMarker.header.frame_id = "/map" + robotMarker.header.stamp = rospy.get_rostime() + robotMarker.ns = point_name + robotMarker.id = 0 + robotMarker.type = 2 # sphere + robotMarker.action = 0 + robotMarker.pose.position = Point(point[0], point[1], point[2]) + robotMarker.pose.orientation.x = 0 + robotMarker.pose.orientation.y = 0 + robotMarker.pose.orientation.z = 0 + robotMarker.pose.orientation.w = 1.0 + robotMarker.scale.x = .5 + robotMarker.scale.y = .5 + robotMarker.scale.z = .5 + + robotMarker.color.r = 1.0 + robotMarker.color.g = 1.0 + robotMarker.color.b = 0.0 + robotMarker.color.a = 1.0 + + robotMarker.lifetime = rospy.Duration(0) + return robotMarker + + def acquire_targets(self, roi): + if not self.enabled: + return + # NOTE: point.z contains the timestamp of the image when it was processed in the neural net. + x0 = roi.x_offset + y0 = roi.y_offset + height = roi.height + width = roi.width + + point0 = np.array([x0, y0]) + point1 = np.array([x0+width, y0]) + point2 = np.array([x0, y0+height]) + point3 = np.array([x0+width, y0+height]) + # print("p1: ", point1) + # print("p2: ", point2) + try: + self.tf_listener.waitForTransform('/map', + '/front_left_cam_optical', + self.image_sub.last_image_time - rospy.Time(.2), + rospy.Duration(0.2)) + except tf.Exception as e: + rospy.logwarn( + "Could not transform camera to map: {}".format(e)) + return False + + (t, rot_q) = self.tf_listener.lookupTransform( + '/map', '/front_left_cam_optical', self.image_sub.last_image_time - rospy.Time(.2)) + R = mil_ros_tools.geometry_helpers.quaternion_matrix(rot_q) + + self.add_observation(point0, (np.array(t), R), + self.image_sub.last_image_time) + observations1, pose_pairs1 = self.add_observation1(point1, (np.array(t), R), + self.image_sub.last_image_time) + observations2, pose_pairs2 = self.add_observation2(point2, (np.array(t), R), + self.image_sub.last_image_time) + + observations, pose_pairs = self.get_observations_and_pose_pairs() + if len(observations) > self.min_observations: + self.est = self.multi_obs.lst_sqr_intersection( + observations, pose_pairs) + self.est1 = self.multi_obs.lst_sqr_intersection( + observations1, pose_pairs1) + self.est2 = self.multi_obs.lst_sqr_intersection( + observations2, pose_pairs2) + # print('est1: ', self.est1) + # print('est2: ', self.est2) + self.status = 'Pose found' + self.print_info("Pose Found!") + self.plane = self.generate_plane() + # print("Plane: ", self.plane) + self.markerPub0.publish(self.marker_msg(self.est, 'est0')) + self.markerPub1.publish(self.marker_msg(self.est1, 'est1')) + self.markerPub2.publish(self.marker_msg(self.est2, 'est2')) + print((self.est + self.est1 + self.est2) / 3) + + else: + self.status = '{} observations'.format(len(observations)) + + +def main(args): + rospy.init_node('xyz_points', anonymous=False) + MultiObs() + + try: + rospy.spin() + except KeyboardInterrupt: + print("Shutting down") + cv2.destroyAllWindows() + + +if __name__ == '__main__': + main(sys.argv) diff --git a/perception/sub8_perception/nodes/vampire_identifier.py b/perception/sub8_perception/nodes/vampire_identifier.py new file mode 100755 index 00000000..dedba01f --- /dev/null +++ b/perception/sub8_perception/nodes/vampire_identifier.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python +from __future__ import print_function +import sys +import tf +import mil_ros_tools +import cv2 +import numpy as np +import rospy +from sub8_perception.cfg import VampireIdentifierConfig +from std_msgs.msg import Header +from collections import deque +from sensor_msgs.msg import Image +from cv_bridge import CvBridgeError +from sub8_vision_tools import MultiObservation +from image_geometry import PinholeCameraModel +from std_srvs.srv import SetBool, SetBoolResponse +from geometry_msgs.msg import PoseStamped, Pose, Point +from sub8_msgs.srv import VisionRequest, VisionRequestResponse +from mil_ros_tools import Image_Subscriber, Image_Publisher +from cv_bridge import CvBridge +from dynamic_reconfigure.server import Server as DynamicReconfigureServer + +''' +This Vampiric Grymoire identifies the four major types of Vampire: + Jiangshi --> Flat "Buoy" + Draugr --> 3 Sided Buoy + Aswang --> 3 Sided Buoy + Vetalas --> 3 Sided Buoy + +General outline of task + Find 1 Sided Buoy with Sonar. + Boop the Buoy. + Find 3 Sided Buoy with Sonar. + Find Declared Enemy based on config. + Boop the Declared Enemy. + Flee scene of crime before other vampires get to us. +''' + + +class VampireIdentifier: + + def __init__(self): + + # Pull constants from config file + self.override = False + self.lower = [0, 0, 0] + self.upper = [0, 0, 0] + self.min_trans = 0 + self.max_velocity = 0 + self.timeout = 0 + self.min_observations = 0 + self.camera = rospy.get_param('~camera_topic', + '/camera/down/image_rect_color') + self.goal = None + self.last_config = None + self.reconfigure_server = DynamicReconfigureServer(VampireIdentifierConfig, self.reconfigure) + + # Instantiate remaining variables and objects + self._observations = deque() + self._pose_pairs = deque() + self._times = deque() + self.last_image_time = None + self.last_image = None + self.tf_listener = tf.TransformListener() + self.status = '' + self.est = None + self.visual_id = 0 + self.enabled = False + self.bridge = CvBridge() + + # Image Subscriber and Camera Information + + self.image_sub = Image_Subscriber(self.camera, self.image_cb) + self.camera_info = self.image_sub.wait_for_camera_info() + + self.camera_info = self.image_sub.wait_for_camera_info() + self.camera_model = PinholeCameraModel() + self.camera_model.fromCameraInfo(self.camera_info) + self.frame_id = self.camera_model.tfFrame() + + # Ros Services so mission can be toggled and info requested + rospy.Service('~enable', SetBool, self.toggle_search) + self.multi_obs = MultiObservation(self.camera_model) + rospy.Service('~pose', VisionRequest, self.request_buoy) + self.image_pub = Image_Publisher("drac_vision/debug") + self.point_pub = rospy.Publisher( + "drac_vision/points", Point, queue_size=1) + self.mask_image_pub = rospy.Publisher( + 'drac_vision/mask', Image, queue_size=1) + + # Debug + self.debug = rospy.get_param('~debug', True) + + @staticmethod + def parse_string(threshes): + ret = [float(thresh.strip()) for thresh in threshes.split(',')] + if len(ret) != 3: + raise ValueError('not 3') + return ret + + def reconfigure(self, config, level): + try: + self.override = config['override'] + self.goal = config['target'] + self.lower = self.parse_string(config['dyn_lower']) + self.upper = self.parse_string(config['dyn_upper']) + self.min_trans = config['min_trans'] + self.max_velocity = config['max_velocity'] + self.timeout = config['timeout'] + self.min_observations = config['min_obs'] + + except ValueError as e: + rospy.logwarn('Invalid dynamic reconfigure: {}'.format(e)) + return self.last_config + + if self.override: + # Dynamic Values for testing + self.lower = np.array(self.lower) + self.upper = np.array(self.upper) + else: + # Hard Set for use in Competition + if self.goal == 'drac': + self.lower = rospy.get_param('~dracula_low_thresh', [0, 0, 80]) + self.upper = rospy.get_param('~dracula_high_thresh', [0, 0, 80]) + else: + raise ValueError('Invalid Target Name') + self.last_config = config + rospy.loginfo('Params succesfully updated via dynamic reconfigure') + return config + + def image_cb(self, image): + ''' + Run each time an image comes in from ROS. + ''' + if not self.enabled: + return + + self.last_image = image + + if self.last_image_time is not None and \ + self.image_sub.last_image_time < self.last_image_time: + # Clear tf buffer if time went backwards (nice for playing bags in + # loop) + self.tf_listener.clear() + + self.last_image_time = self.image_sub.last_image_time + self.acquire_targets(image) + + def toggle_search(self, srv): + ''' + Callback for standard ~enable service. If true, start + looking at frames for buoys. + ''' + if srv.data: + rospy.loginfo("TARGET ACQUISITION: enabled") + self.enabled = True + + else: + rospy.loginfo("TARGET ACQUISITION: disabled") + self.enabled = False + + return SetBoolResponse(success=True) + + def request_buoy(self, srv): + ''' + Callback for 3D vision request. Uses recent observations of target + board specified in target_name to attempt a least-squares position + estimate. Ignoring orientation of board. + ''' + if not self.enabled: + return VisionRequestResponse(found=False) + # buoy = self.buoys[srv.target_name] + if self.est is None: + return VisionRequestResponse(found=False) + return VisionRequestResponse( + pose=PoseStamped( + header=Header(stamp=self.last_image_time, frame_id='/map'), + pose=Pose(position=Point(*self.est))), + found=True) + + def clear_old_observations(self): + ''' + Observations older than two seconds are discarded. + ''' + time = rospy.Time.now() + i = 0 + while i < len(self._times): + if time - self._times[i] > self.timeout: + self._times.popleft() + self._observations.popleft() + self._pose_pairs.popleft() + else: + i += 1 + # print('Clearing') + + def add_observation(self, obs, pose_pair, time): + ''' + Add a new observation associated with an object + ''' + + self.clear_old_observations() + # print('Adding...') + if slen(self._observations) == 0 or np.linalg.norm( + self._pose_pairs[-1][0] - pose_pair[0]) > self.min_trans: + self._observations.append(obs) + self._pose_pairs.append(pose_pair) + self._times.append(time) + + def get_observations_and_pose_pairs(self): + ''' + Fetch all recent observations + clear old ones + ''' + + self.clear_old_observations() + return (self._observations, self._pose_pairs) + + def detect(self, c): + ''' + Verify the shape in the masked image is large enough to be a valid target. + This changes depending on target Vampire, as does the number of targets we want. + ''' + target = "unidentified" + peri = cv2.arcLength(c, True) + + if peri < self.min_contour_area or peri > self.max_contour_area: + return target + approx = cv2.approxPolyDP(c, 0.04 * peri, True) + + target = "Target Aquisition Successful" + + return target + + def mask_image(self, cv_image, lower, upper): + mask = cv2.inRange(cv_image, lower, upper) + # Remove anything not within the bounds of our mask + output = cv2.bitwise_and(cv_image, cv_image, mask=mask) + print('ree') + + if (self.debug): + try: + # print(output) + self.mask_image_pub.publish( + self.bridge.cv2_to_imgmsg(np.array(output), 'bgr8')) + except CvBridgeError as e: + print(e) + + return output + + def acquire_targets(self, cv_image): + # Take in the data and get its dimensions. + height, width, channels = cv_image.shape + + # create NumPy arrays from the boundaries + lower = np.array(self.lower, dtype="uint8") + upper = np.array(self.upper, dtype="uint8") + + # Generate a mask based on the constants. + blurred = self.mask_image(cv_image, lower, upper) + blurred = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY) + # Compute contours + cnts = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) + + cnts = cnts[1] + ''' + We use OpenCV to compute our contours and then begin processing them + to ensure we are identifying a proper target. + ''' + + shape = '' + peri_max = 0 + max_x = 0 + max_y = 0 + m_shape = '' + for c in cnts: + # compute the center of the contour, then detect the name of the + # shape using only the contour + M = cv2.moments(c) + if M["m00"] == 0: + M["m00"] = .000001 + cX = int((M["m10"] / M["m00"])) + cY = int((M["m01"] / M["m00"])) + self.point_pub.publish(Point(x=cX, y=cY)) + shape = self.detect(c) + + # multiply the contour (x, y)-coordinates by the resize ratio, + # then draw the contours and the name of the shape on the image + + c = c.astype("float") + # c *= ratio + c = c.astype("int") + if shape == "Target Aquisition Successful": + if self.debug: + try: + cv2.drawContours(cv_image, [c], -1, (0, 255, 0), 2) + cv2.putText(cv_image, shape, (cX, cY), + cv2.FONT_HERSHEY_SIMPLEX, 0.5, + (255, 255, 255), 2) + self.image_pub.publish(cv_image) + except CvBridgeError as e: + print(e) + + + # Grab the largest contour. Generally this is a safe bet but... We may need to tweak this for the three different vampires. + peri = cv2.arcLength(c, True) + if peri > peri_max: + peri_max = peri + max_x = cX + max_y = cY + m_shape = shape + ''' + Approximate 3D coordinates. + ''' + + if m_shape == "Target Aquisition Successful": + try: + self.tf_listener.waitForTransform('/map', + self.camera_model.tfFrame(), + self.last_image_time, + rospy.Duration(0.2)) + except tf.Exception as e: + rospy.logwarn( + "Could not transform camera to map: {}".format(e)) + return False + + (t, rot_q) = self.tf_listener.lookupTransform( + '/map', self.camera_model.tfFrame(), self.last_image_time) + R = mil_ros_tools.geometry_helpers.quaternion_matrix(rot_q) + center = np.array([max_x, max_y]) + self.add_observation(center, (np.array(t), R), + self.last_image_time) + + observations, pose_pairs = self.get_observations_and_pose_pairs() + if len(observations) > self.min_observations: + self.est = self.multi_obs.lst_sqr_intersection( + observations, pose_pairs) + self.status = 'Pose found' + + else: + self.status = '{} observations'.format(len(observations)) + + +def main(args): + rospy.init_node('vamp_ident', anonymous=False) + VampireIdentifier() + + try: + rospy.spin() + except KeyboardInterrupt: + print("Shutting down") + cv2.destroyAllWindows() + + +if __name__ == '__main__': + main(sys.argv)