diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..0c4e12d3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,79 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Binaries +*.npz + +# PyCharm +.idea/ +CMakeLists.txt + +# Other +*.mtl +*.obj +!transdec.obj + +*.dic +pedometry.txt +.DS_STORE +# Autosave files from QtCreator +*.autosave +*~ + +# Vim swaps +*.swp diff --git a/drivers/mil_passive_sonar/CMakeLists.txt b/drivers/mil_passive_sonar/CMakeLists.txt new file mode 100644 index 00000000..94c4818b --- /dev/null +++ b/drivers/mil_passive_sonar/CMakeLists.txt @@ -0,0 +1,19 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mil_passive_sonar) + +find_package(catkin REQUIRED COMPONENTS + rospy +) + +catkin_python_setup() + +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES sub8_sonar +# CATKIN_DEPENDS rospy +# DEPENDS system_lib +) + +include_directories( + ${catkin_INCLUDE_DIRS} +) diff --git a/drivers/mil_passive_sonar/README.md b/drivers/mil_passive_sonar/README.md new file mode 100644 index 00000000..9fc82f1d --- /dev/null +++ b/drivers/mil_passive_sonar/README.md @@ -0,0 +1,24 @@ +# Sonar Driver + +This script interfaces with Jake's sonar board and provides a ROS service that returns the location and time of emission of the last heard pinger pulse. + +## How to run and use +To start the driver, run: + + rosrun mil_passive_sonar sonar.launch + +> Note: make sure that port, and baud rate, and hydrophone locations are loaded into the parameter server. See launch/test.launch for an example. + +In order to ask for hydrophone information: + + rosservice call /sonar/get_pinger_pulse *double_tab* + +The service should respond with the x, y, and z of the last heard pinger +pulse. Remember that this should be considered the tip of an arrow +pointing in the direction of the pinger, the range is completely unreliable +and varies wildly. + +## TODO ++ This package is not yet fully set up to use the paulbaurd. The interface to make this happen would be simple to implement. ++ This package should estimate the least squares solution for the actual 3d position of the pinger. ++ Visualize both individual heading veactors and the LS position estimate. diff --git a/drivers/mil_passive_sonar/launch/test.launch b/drivers/mil_passive_sonar/launch/test.launch new file mode 100644 index 00000000..206be6b9 --- /dev/null +++ b/drivers/mil_passive_sonar/launch/test.launch @@ -0,0 +1,19 @@ + + + + + + + + { hydro0: {x: 0, y: 0, z: 0}, + hydro1: {x: 0, y: 25.4, z: 0}, + hydro2: {x: -22, y: -12.7, z: 0}, + hydro3: {x: 22, y: -12.7, z: 0}} + + + diff --git a/drivers/mil_passive_sonar/mil_passive_sonar/__init__.py b/drivers/mil_passive_sonar/mil_passive_sonar/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/drivers/mil_passive_sonar/mil_passive_sonar/sonar_driver.py b/drivers/mil_passive_sonar/mil_passive_sonar/sonar_driver.py new file mode 100755 index 00000000..ddfb3f6a --- /dev/null +++ b/drivers/mil_passive_sonar/mil_passive_sonar/sonar_driver.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +from __future__ import division +import numpy as np +import numpy.linalg as la +from scipy.signal import resample, periodogram + +from itertools import product + +import rospy +import rosparam + +from sub8_msgs.srv import Sonar, SonarResponse +from sub8_ros_tools import thread_lock, make_header +from sub8_alarm import AlarmBroadcaster +from multilateration import Multilaterator + +import threading +import serial +import binascii +import struct +import time +import crc16 +import sys + +# temp +import matplotlib +matplotlib.use("WX") +import matplotlib.patches as mpatches +import matplotlib.pyplot as plt +fig = plt.figure(0) +fig2 =plt.figure(1) +plt.ion() +plt.show() + +lock = threading.Lock() + + +class Sub8Sonar(): + ''' + Smart sensor that provides high level ROS code with the location of pinger pulses detected with + Jake Easterling's Hydrophone board. + + TODO: Add a function to try and reconnect to the serial port if we lose connection. + TODO: Express pulse location in map frame + ''' + def __init__(self, method, c, hydrophone_locations, port, baud=19200): + rospy.init_node("sonar") + + alarm_broadcaster = AlarmBroadcaster() + self.disconnection_alarm = alarm_broadcaster.add_alarm( + name='sonar_disconnect', + action_required=True, + severity=0 + ) + self.packet_error_alarm = alarm_broadcaster.add_alarm( + name='sonar_packet_error', + action_required=False, + severity=2 + ) + + try: + self.ser = serial.Serial(port=port, baudrate=baud, timeout=None) + self.ser.flushInput() + except Exception, e: + print "\x1b[31mSonar serial connection error:\n\t", e, "\x1b[0m" + return None + + self.c = c + self.hydrophone_locations = [] + for key in hydrophone_locations: + sensor_location = np.array([hydrophone_locations[key]['x'], hydrophone_locations[key]['y'], hydrophone_locations[key]['z']]) + self.hydrophone_locations += [sensor_location] + self.multilaterator = Multilaterator(hydrophone_locations, self.c, method) # speed of sound in m/s + self.est_signal_freq_kHz = 0 + self._freq_sum = 0 + self._freq_samples = 0 + + rospy.Service('~/sonar/get_pinger_pulse', Sonar, self.get_pulse_from_raw_data) + print "\x1b[32mSub8 sonar driver initialized\x1b[0m" + rospy.spin() + + @thread_lock(lock) + def get_pulse_from_timestamps(self, srv): + self.ser.flushInput() + + try: + self.ser.write('A') + print "Sent timestamp request..." + except: # Except only serial errors in the future. + self.disconnection_alarm.raise_alarm( + problem_description="Sonar board serial connection has been terminated." + ) + return False + return self.multilaterator.getPulseLocation(self.timestamp_listener()) + + def timestamp_listener(self): + ''' + Parse the response of the board. + ''' + print "Listening..." + + # We've found a packet now disect it. + response = self.ser.read(19) + # rospy.loginfo("Received: %s" % response) #uncomment for debugging + if self.check_CRC(response): + delete_last_lines(0) # Heard response! + # The checksum matches the data so split the response into each piece of data. + # For info on what these letters mean: https://docs.python.org/2/library/struct.html#format-characters + data = struct.unpack('>BffffH', response) + timestamps = [data[4], data[1], data[2], data[3] ] + print "timestamps:", timestamps + return timestamps + else: + self.packet_error_alarm.raise_alarm( + problem_description="Sonar board checksum error.", + parameters={ + 'fault_info': {'data': response} + } + ) + return None + + @thread_lock(lock) + def get_pulse_from_raw_data(self, srv): + # request signals from hydrophone board + self.ser.flushInput() + try: + self.ser.write('B') + print "Sent raw data request..." + except: # Except only serial errors in the future. + self.disconnection_alarm.raise_alarm( + problem_description="Sonar board serial connection has been terminated." + ) + return False + return self.multilaterator.getPulseLocation(self.raw_data_listener()) + + def raw_data_listener(self): + ''' + Parse the response of the board. + ''' + print "Listening..." + + # prepare arrays for storing signals + signal_bias = 32767 + waves_recorded = 3 + samples_per_wave = 17.24 + upsample_scale = 3 + exp_recording_size = np.floor(samples_per_wave) * waves_recorded * upsample_scale - 1 + raw_signals = np.zeros([4, exp_recording_size], dtype=float) + + try: + # read in start bit of packet + timeout = 0 + start_bit = self.ser.read(1) + # If start bit not read, keep trying + while start_bit != '\xBB': + start_bit = self.ser.read(1) + timeout += 1 + if timeout > 600: + raise BufferError("Timeout: failed to read a start bit from sonar board") + for elem in np.nditer(raw_signals, op_flags=['readwrite']): + elem[...] = float(self.ser.read(5)) - signal_bias # ... is idx to current elem + except BufferError as e: + print e + + non_zero_end_idx = 57 + up_factor = 8 + sampling_freq = 430E3 * 0.8 # Hz + samp_period = 1E6 / sampling_freq # microseconds + upsamp_step = samp_period / up_factor + + # Set how much of each signal to actually use for processing + raw_signals = raw_signals[:, 0:non_zero_end_idx] + + # upsample the signals for successful cross-correlation + upsampled_signals = [resample(x, up_factor*len(x)) for x in raw_signals] + + # scale waves so they all have the same variance + equalized_signals = [x / np.std(x) for x in upsampled_signals] + raw_signals = [x / np.std(x) for x in raw_signals] + w0_upsamp, w1_upsamp, w2_upsamp, w3_upsamp = equalized_signals + t_up = np.arange(0, non_zero_end_idx*samp_period, upsamp_step) + + zero_crossings = np.where(np.diff(np.sign(w0_upsamp)))[0] + num_crossings = len(zero_crossings) + if num_crossings % 2 == 0: # we want an odd number of zero crossings + zero_crossings = zero_crossings[1:] + num_crossings -= 1 + waves_between_first_and_last_crossing = (num_crossings - 1) / 2 + time_between_first_and_last_crossing = t_up[zero_crossings[-1]] - t_up[zero_crossings[0]] + curr_signal_freq_kHz = 1E3 * waves_between_first_and_last_crossing / time_between_first_and_last_crossing # kHz + self._freq_sum += curr_signal_freq_kHz + self._freq_samples += 1 + self.est_signal_freq_kHz = self._freq_sum / self._freq_samples + + + # frequencies, spectrum = periodogram(w0_upsamp, sampling_freq * up_factor) + # signal_freq = frequencies[np.argmax(spectrum)] # Hz + # print "fft source frequency:", signal_freq, "Hz" + print "current source frequency:", curr_signal_freq_kHz, "kHz" + print "est source frequency:", self.est_signal_freq_kHz, "kHz" + # ax = fig2.add_subplot(111) + # ax.semilogy(frequencies,spectrum) + # ax.set_ylim([1e-7, 1e2]) + # ax.set_xlim([0, 6e4]) + # ax.set_xlabel('frequency [Hz]') + signal_period = 1E3 / self.est_signal_freq_kHz # microseconds + upsamples_recorded = len(w0_upsamp) + + waves_ref = 3.5 + waves_non_ref = 2.5 + samples_per_wave = signal_period / samp_period + ref_upsamples = int(round(waves_ref * samples_per_wave * up_factor)) + nonref_upsamples = int(np.ceil(waves_non_ref * samples_per_wave * up_factor)) + + ref_start_idx = None + if (len(w0_upsamp) % 2 == ref_upsamples % 2): + ref_start_idx = int(round((upsamples_recorded / 2) - (ref_upsamples / 2))) + else: + ref_start_idx = int(np.ceil((upsamples_recorded / 2) - (ref_upsamples / 2))) + ref_end_idx = ref_start_idx + ref_upsamples - 1 + nonref_end_idx = ref_start_idx + nonref_upsamples - 1 + + w0_select = w0_upsamp[ref_start_idx : ref_end_idx + 1] + w1_select = w1_upsamp[ref_start_idx : nonref_end_idx + 1] + w2_select = w2_upsamp[ref_start_idx : nonref_end_idx + 1] + w3_select = w3_upsamp[ref_start_idx : nonref_end_idx + 1] + t_ref_select = t_up[ref_start_idx : ref_end_idx + 1] + t_nonref_select = t_up[ref_start_idx : nonref_end_idx + 1] + + cc1 = np.correlate(w0_select, w1_select, mode='full') + cc2 = np.correlate(w0_select, w2_select, mode='full') + cc3 = np.correlate(w0_select, w3_select, mode='full') + corr_start = t_ref_select[0] - t_nonref_select[-1] + corr_end = t_ref_select[-1] - t_ref_select[0] + upsamp_step + t_corr = np.arange(corr_start, corr_end, upsamp_step) + print len(cc1), len(t_corr) + + fig.clf() + ax1 = fig.add_subplot(511) + ax2 = fig.add_subplot(512) + ax3 = fig.add_subplot(513) + ax4 = fig.add_subplot(514) + ax5 = fig.add_subplot(515) + axarr = [ax1, ax2, ax3, ax4, ax5] + + axarr[0].plot(t_up,w1_upsamp,'r',t_up,w2_upsamp,'g',t_up,w3_upsamp,'b',t_up,w0_upsamp,'k') + axarr[0].axis([0,60*samp_period,-3,3]) + axarr[0].set_title('Signals') + + axarr[1].plot(t_nonref_select, w1_select, 'r', + t_nonref_select, w2_select, 'g', + t_nonref_select, w3_select, 'b', + t_ref_select, w0_select, 'k') + axarr[1].set_title('Selected portions') + + axarr[2].plot(t_corr, cc1) + axarr[2].set_title('Hydrophone 1 cross-correlation') + axarr[3].plot(t_corr, cc2) + axarr[3].set_title('Hydrophone 2 cross-correlation') + axarr[4].plot(t_corr, cc3) + axarr[4].set_title('Hydrophone 3 cross-correlation') + + plt.draw() + plt.pause(0.1) + + return [0,0,0,0] #DBG + + def max_delta_t(hydrophone_idx_a, hydrophone_idx_b): + a = self.hydrophone_locations[hydrophone_idx_a] + b = self.hydrophone_locations[hydrophone_idx_b] + dist = la.norm(a - b) + return dist / self.c + + def CRC(self, message): + # You may have to change the checksum type. + # Check the crc16 module online to see how to do that. + crc = crc16.crc16xmodem(message, 0xFFFF) + return struct.pack('>H', crc) + + def check_CRC(self, message): + ''' + Given a message with a checksum as the last two bytes, this will return True or False + if the checksum matches the given message. + ''' + msg_checksum = message[-2:] + raw_message = message[:-2] + crc = crc16.crc16xmodem(raw_message, 0xFFFF) + + # If the two match the message was correct + if crc == struct.unpack('>H', msg_checksum)[0]: + return True + else: + return False + +def delete_last_lines(n=1): + CURSOR_UP_ONE = '\x1b[1A' + ERASE_LINE = '\x1b[2K' + for _ in range(n): + sys.stdout.write(CURSOR_UP_ONE) + sys.stdout.write(ERASE_LINE) + +if __name__ == "__main__": + d = Sub8Sonar('LS', 1.484, rospy.get_param('~/sonar_driver/hydrophones'), + "/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_AH02X4IE-if00-port0", + 19200) \ No newline at end of file diff --git a/drivers/mil_passive_sonar/multilateration/__init__.py b/drivers/mil_passive_sonar/multilateration/__init__.py new file mode 100644 index 00000000..87a0c3ea --- /dev/null +++ b/drivers/mil_passive_sonar/multilateration/__init__.py @@ -0,0 +1 @@ +from multilateration import Multilaterator, ReceiverArraySim, Pulse diff --git a/drivers/mil_passive_sonar/multilateration/multilateration.py b/drivers/mil_passive_sonar/multilateration/multilateration.py new file mode 100644 index 00000000..1ef865c4 --- /dev/null +++ b/drivers/mil_passive_sonar/multilateration/multilateration.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +from __future__ import division +import numpy as np +import numpy.linalg as la +from scipy import optimize +from itertools import combinations + +from sub8_msgs.srv import Sonar, SonarResponse + + +class Multilaterator(object): + ''' + Finds the origin location of a pulse given differential times of + arrival to the individual sensors. c is the wave speed in the medium of operation. + Units: + Hydrohone coordinates are expected in millimeters, pulse location will be given in millimeters. + Timestamps are expected in microseconds. c is expected in millimeters per microsecond + Note: + hydrophone locations should be the dict returned by rospy.get_param('~//hydrophones + ''' + def __init__(self, hydrophone_locations, c, method): # speed in millimeters/microsecond + self.hydrophone_locations = [] + for key in hydrophone_locations: + sensor_location = np.array([hydrophone_locations[key]['x'], hydrophone_locations[key]['y'], hydrophone_locations[key]['z']]) + self.hydrophone_locations += [sensor_location] + self.pairs = list(combinations(range(len(hydrophone_locations)),2)) + self.c = c + self.method = method + print "\x1b[32mSpeed of Sound (c):", self.c, "millimeter/microsecond\x1b[0m" + + def getPulseLocation(self, timestamps, method=None): + ''' + Returns a ros message with the location and time of emission of a pinger pulse. + ''' + try: + if method == None: + method = self.method + # print "\x1b[32mMultilateration algorithm:", method, "\x1b[0m" + assert len(self.hydrophone_locations) == len(timestamps) + source = None + if method == 'bancroft': + source = self.estimate_pos_bancroft(timestamps) + elif method == 'LS': + source = self.estimate_pos_LS(timestamps) + else: + print method, "is not an available multilateration algorithm" + return + response = SonarResponse() + response.x = source[0] + response.y = source[1] + response.z = source[2] + print "Reconstructed Pulse:\n\t" + "x: " + str(response.x) + " y: " + str(response.y) \ + + " z: " + str(response.z) + " (mm)" + return response + except KeyboardInterrupt: + print "Source localization interupted, returning all zeroes." + response = SonarResponse() + response.x, response.y, response.z = (0, 0, 0) + + + def estimate_pos_bancroft(self, reception_times): + N = len(reception_times) + assert N >= 4 + + L = lambda a, b: a[0]*b[0] + a[1]*b[1] + a[2]*b[2] - a[3]*b[3] + + def get_B(delta): + B = np.zeros((N, 4)) + for i in xrange(N): + B[i] = np.concatenate([self.hydrophone_locations[i]/(self.c), [-reception_times[i]]]) + delta + return B + + delta = min([.1*np.random.randn(4) for i in xrange(10)], key=lambda delta: np.linalg.cond(get_B(delta))) + # delta = np.zeros(4) # gives very good heading for noisy timestamps, although range is completely unreliable + + B = get_B(delta) + a = np.array([0.5 * L(B[i], B[i]) for i in xrange(N)]) + e = np.ones(N) + + Bpe = np.linalg.lstsq(B, e)[0] + Bpa = np.linalg.lstsq(B, a)[0] + + Lambdas = quadratic( + L(Bpe, Bpe), + 2*(L(Bpa, Bpe) - 1), + L(Bpa, Bpa)) + if not Lambdas: + return [0, 0, 0] + + res = [] + for Lambda in Lambdas: + u = Bpa + Lambda * Bpe + position = u[:3] - delta[:3] + time = u[3] + delta[3] + if any(reception_times[i] < time for i in xrange(N)): continue + res.append(position*self.c) + if len(res) == 1: + source = res[0] + elif len(res) == 2: + source = [x for x in res if x[2] < 0] # Assume that the source is below us + if not source: + source = res[0] + else: + source = source[0] + else: + source = [0, 0, 0] + return source + + def estimate_pos_LS(self, timestamps): + ''' + Returns a ros message with the location and time of emission of a pinger pulse. + ''' + self.timestamps = timestamps + init_guess = np.random.normal(0,100,3) + opt = {'disp': 0} + opt_method = 'Powell' + result = optimize.minimize(self.cost_LS, init_guess, method=opt_method, options=opt, tol=1e-15) + if(result.success): + source = [result.x[0], result.x[1], result.x[2]] + else: + source = [0, 0, 0] + return source + + def cost_LS(self, potential_pulse): + ''' + Compares the difference in observed and theoretical difference in time of arrival + between tevery unique pair of hydrophones. + + Note: the result will be along the direction of the heading but not at the right distance. + ''' + cost = 0 + t = self.timestamps + c = self.c + x = np.array(potential_pulse) + for pair in self.pairs: + h0 = self.hydrophone_locations[pair[0]] + h1 = self.hydrophone_locations[pair[1]] + residual = la.norm(x-h0) - la.norm(x-h1) - c*(t[pair[0]] - t[pair[1]]) + cost += residual**2 + return cost + + def cost_LS2(self, potential_pulse): + """ + Slightly less accurate than the one above in terms of heading but much faster. + """ + cost = 0 + t = self.timestamps + x0 = self.hydrophone_locations[0][0] + y0 = self.hydrophone_locations[0][1] + z0 = self.hydrophone_locations[0][2] + x = potential_pulse[0] + y = potential_pulse[1] + z = potential_pulse[2] + d0 = np.sqrt((x0 - x)**2 + (y0 - x)**2 + (z0 - x)**2) + for i in xrange(1, len(self.hydrophone_locations)): + xi = self.hydrophone_locations[i][0] + yi = self.hydrophone_locations[i][1] + zi = self.hydrophone_locations[i][2] + di = np.sqrt((xi - x)**2 + (yi - x)**2 + (zi - x)**2) + hydro_i_cost = (di - d0 - self.c * t[i])**2 + cost = cost + hydro_i_cost + return cost + + +class ReceiverArraySim(object): + """ + Simulates an array of receivers that listens to point sources and returns the DTOA. + (difference in time of arrival) + Base Units: + time - microseconds + length - millimeters + """ + def __init__(self, hydrophone_locations, wave_propagation_speed_mps): + self.c = wave_propagation_speed_mps + self.hydrophone_locations = np.array([0, 0, 0]) + for key in hydrophone_locations: + sensor_location = np.array([hydrophone_locations[key]['x'], hydrophone_locations[key]['y'], hydrophone_locations[key]['z']]) + self.hydrophone_locations = np.vstack((self.hydrophone_locations, sensor_location)) + self.hydrophone_locations = self.hydrophone_locations[1:] + + def listen(self, pulse): + timestamps = [] + for idx in range(4): + src_range = np.sqrt(sum(np.square(pulse.position() - self.hydrophone_locations[idx]))) + timestamps += [pulse.t + src_range / self.c] + return np.array(timestamps) + +class Pulse(object): + """ + Represents an omnidirectional wave or impulse emmited from a point source + """ + def __init__(self, x, y, z, t): + self.x = x + self.y = y + self.z = z + self.t = t + + def position(self): + return np.array([self.x, self.y, self.z]) + + def __repr__(self): + return "Pulse:\t" + "x: " + str(self.x) + " y: " + str(self.y) + " z: " \ + + str(self.z) + " (mm)" + + +def quadratic(a, b, c): + discriminant = b*b - 4*a*c + if discriminant >= 0: + first_times_a = (-b+math.copysign(math.sqrt(discriminant), -b))/2 + return [first_times_a/a, c/first_times_a] + else: + return [] \ No newline at end of file diff --git a/drivers/mil_passive_sonar/nodes/pinger_finder.py b/drivers/mil_passive_sonar/nodes/pinger_finder.py new file mode 100755 index 00000000..88d2faf8 --- /dev/null +++ b/drivers/mil_passive_sonar/nodes/pinger_finder.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +import rospy +import rospkg +import numpy as np +from numpy import linalg as npl +import tf +from navigator_msgs.srv import FindPinger, SetFrequency, SetFrequencyResponse +from hydrophones.msg import ProcessedPing +from geometry_msgs.msg import Point, PoseStamped, Pose +from visualization_msgs.msg import Marker +from std_srvs.srv import SetBool, SetBoolResponse +import navigator_tools + +class PingerFinder(object): + """ + This class will find the position of a pinger at a given frequency. For it + to find a good solution, it needs to get observations from at least 2 sufficiently + different positions. Observations are considered valid if they fall within + self.freq_tol of self.target_freq, where self.target_freq can be set + using 'rosservice call /hydrophones/set_freq "frequency: ____"'. + + The default behaviour is to not store observations. To start recording valid + observations, run 'rosservice call /hydrophones/set_listen "data: true"'. Remember + to run 'rosservice call /hydrophones/set_listen "data: false"' to stop recording + observations. For best results, it is best to only record observations while the + vehicle is station-keeping. + + When running, this class will publish arrow markers to /visualization_marker and + a green cube marker representing the estimated position of the pinger whenever the + /hydrophones/find_pinger service is called. + """ + + def __init__(self): + self.map_frame = "/enu" + self.hydrophone_frame = "/hydrophones" + self.tf_listener = tf.TransformListener() + self.target_freq = 35E3 + self.freq_tol = 1E3 + self.min_amp = 200 # if too many outliers, increase this + self.max_observations = 200 + self.line_array =np.empty((0, 4), float) + self.observation_amplitudes = np.empty((0, 0), float) + self.pinger_position = Point(0, 0, 0) + self.heading_pseudorange = 10 + self.debug_arrow_id = 0 + self.debug_arrow_lifetime = rospy.Duration(10, 0) + self.listen = False + self.ping_sub = rospy.Subscriber("/hydrophones/processed", ProcessedPing, callback=self.ping_cb) + self.marker_pub = rospy.Publisher("/visualization_marker", Marker, queue_size=10) + self.pinger_finder_srv = rospy.Service('hydrophones/find_pinger', FindPinger, self.find_pinger) + self.activate_listening = rospy.Service('hydrophones/set_listen', SetBool, self.set_listen) + self.set_freq_srv = rospy.Service('hydrophones/set_freq', SetFrequency, self.set_freq) + + def ping_cb(self, ping): + print rospkg.get_ros_package_path() + print "PINGERFINDER: freq={p.freq:.0f} amp={p.amplitude:.0f}".format(p=ping) + if abs(ping.freq - self.target_freq) < self.freq_tol and ping.amplitude > self.min_amp and self.listen: + trans, rot = None, None + try: + self.tf_listener.waitForTransform(self.map_frame, self.hydrophone_frame, ping.header.stamp, rospy.Duration(0.25)) + trans, rot = self.tf_listener.lookupTransform(self.map_frame, self.hydrophone_frame, ping.header.stamp) + except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e: + print e + return + p0 = np.array([trans[0], trans[1]]) + R = tf.transformations.quaternion_matrix(rot)[:3, :3] + delta = R.dot(navigator_tools.point_to_numpy(ping.position))[:2] + p1 = p0 + self.heading_pseudorange * delta / npl.norm(delta) + line_coeffs = np.array([[p0[0], p0[1], p1[0], p1[1]]]) # p0 and p1 define a line + self.visualize_arrow(Point(p0[0], p0[1], 0), Point(p1[0], p1[1], 0)) + self.line_array = np.append(self.line_array, line_coeffs, 0) + self.observation_amplitudes = np.append(self.observation_amplitudes, ping.amplitude) + if len(self.line_array) >= self.max_observations: # delete softest samples if we have over max_observations + softest_idx = np.argmin(self.observation_amplitudes) + self.line_array = np.delete(self.line_array, softest_idx, axis=0) + self.observation_amplitudes = np.delete(self.observation_amplitudes, softest_idx) + print "PINGERFINDER: Observation collected. Total: {}".format(self.line_array.shape[0]) + + def LS_intersection(self): + """ + self.line_array represents a system of 2d line equations. Each row represents a different + observation of a line in map frame on which the pinger lies. Row structure: [x1, y1, x2, y2] + Calculates the point in the plane with the least cummulative distance to every line + in self.line_array. For more information, see: + https://en.wikipedia.org/wiki/Line-line_intersection#In_two_dimensions_2 + """ + + def line_segment_norm(line_end_pts): + assert len(line_end_pts) == 4 + return npl.norm(line_end_pts[2:] - line_end_pts[:2]) + + begin_pts = self.line_array[:, :2] + diffs = self.line_array[:, 2:4] - begin_pts + norms = np.apply_along_axis(line_segment_norm, 1, self.line_array).reshape(diffs.shape[0], 1) + rot_left_90 = np.array([[0, -1], [1, 0]]) + perp_unit_vecs = np.apply_along_axis(lambda unit_diffs: rot_left_90.dot(unit_diffs), 1, diffs / norms) + A_sum = np.zeros((2, 2)) + Ap_sum = np.zeros((2, 1)) + + for x, y in zip(begin_pts, perp_unit_vecs): + begin = x.reshape(2, 1) + perp_vec = y.reshape(2, 1) + A = perp_vec.dot(perp_vec.T) + Ap = A.dot(begin) + A_sum += A + Ap_sum += Ap + + res = npl.inv(A_sum).dot(Ap_sum) + self.pinger_position = Point(res[0], res[1], 0) + return self.pinger_position + + def find_pinger(self, srv_request): + if self.line_array.shape[0] < 2: + print "PINGER_FINDER: Can't locate pinger. Less than two valid observations have been recorded." + return {} + res = {'pinger_position' : self.LS_intersection(), 'num_samples' : self.line_array.shape[0]} + self.visualize_pinger() + return res + + def visualize_pinger(self): + marker = Marker() + marker.ns = "pinger{}".format(self.target_freq) + marker.header.stamp = rospy.Time(0) + marker.header.frame_id = self.map_frame + marker.type = marker.CUBE + marker.action = marker.ADD + marker.scale.x = 1.0 + marker.scale.y = 1.0 + marker.scale.z = 1.0 + marker.color.g = 1.0 + marker.color.a = 0.75 + marker.pose.position = self.pinger_position + if self.pinger_position != Point(0, 0, 0): + self.marker_pub.publish(marker) + print "PINGERFINDER: position: ({p.x[0]:.2f}, {p.y[0]:.2f})".format(p=self.pinger_position) + + + def visualize_arrow(self, tail, head): + marker = Marker() + marker.ns = "pinger{}/heading".format(self.target_freq) + marker.header.stamp = rospy.Time(0) + marker.header.frame_id = self.map_frame + marker.id = self.debug_arrow_id + self.debug_arrow_id += 1 + marker.type = marker.ARROW + marker.action = marker.ADD + marker.lifetime = self.debug_arrow_lifetime + marker.points.append(tail) + marker.points.append(head) + marker.scale.x = 0.5 + marker.scale.y = 1.0 + marker.scale.z = 1.0 + marker.color.g = 1.0 + marker.color.b = 1.0 + marker.color.a = 0.5 + self.marker_pub.publish(marker) + + def set_listen(self, listen_status): + self.listen = listen_status.data + print "PINGERFINDER: setting listening to on" if self.listen else "PINGERFINDER: setting listening to off" + return {'success': True, 'message': ""} + + def set_freq(self, msg): + self.target_freq = msg.frequency + self.line_array = np.empty((0, 4), float) + self.sample_amplitudes = np.empty((0, 0), float) + self.pinger_position = Point(0, 0, 0) + return SetFrequencyResponse() + +if __name__ == '__main__': + rospy.init_node('pinger_finder') + as_per_the_ususal = PingerFinder() + rospy.spin() diff --git a/drivers/mil_passive_sonar/nodes/plotter.py b/drivers/mil_passive_sonar/nodes/plotter.py new file mode 100755 index 00000000..93d8f7ad --- /dev/null +++ b/drivers/mil_passive_sonar/nodes/plotter.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +import rospy +import rosparam + +import numpy as np +from scipy import optimize + +from sub8_msgs.srv import Sonar, SonarResponse + +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') + +print "Waiting for sonar service" +rospy.wait_for_service('~/sonar/get_pinger_pulse') +sonar = rospy.ServiceProxy('~/sonar/get_pinger_pulse', Sonar) +print "sonar serv proxy created" + +try: + while(True): + try: + pinger_pose = sonar() + except Exception: + plt.close('all') + print "\nSonar driver is down, shutting down plotter" + break + print "x:", str(pinger_pose.x).rjust(15), "y:", str(pinger_pose.y).rjust(15), "z:", str(pinger_pose.z).rjust(15) + ax.scatter(pinger_pose.x, pinger_pose.y, pinger_pose.z) + plt.draw() + plt.pause(0.1) +except KeyboardInterrupt: + plt.close('all') + print "\nshutting down plotter" \ No newline at end of file diff --git a/drivers/mil_passive_sonar/nodes/sonar_test.py b/drivers/mil_passive_sonar/nodes/sonar_test.py new file mode 100755 index 00000000..d4aa9108 --- /dev/null +++ b/drivers/mil_passive_sonar/nodes/sonar_test.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +from __future__ import division +import numpy as np +import numpy.linalg as la +from scipy import optimize +import rospy +import rosparam +import random +from multilateration import Multilaterator, ReceiverArraySim, Pulse +import sys + + +if __name__ == '__main__': + def print_green(str): + print '\x1b[32m' + str + '\x1b[0m' + + def error(obs, exp): + # Interesting, faster, but not as accurate + # alpha = np.arccos(np.clip(np.dot(obs/la.norm(obs),exp/la.norm(exp)),-1,1))*180/np.pi + alpha = 2*np.arctan2(la.norm(la.norm(exp)*obs-la.norm(obs)*exp),la.norm(la.norm(exp)*obs+la.norm(obs)*exp)) + mag_error = 100 * (la.norm(obs) - la.norm(exp)) / la.norm(exp) + return ('\x1b[31m' if (mag_error == -100) else "") + ("Errors: directional=" + str(alpha) + "deg").ljust(42) \ + + ("magnitude=" + str(mag_error) + "%").ljust(20) + + def delete_last_lines(n=0): + CURSOR_UP_ONE = '\x1b[1A' + ERASE_LINE = '\x1b[2K' + for _ in range(n): + sys.stdout.write(CURSOR_UP_ONE) + sys.stdout.write(ERASE_LINE) + + c = 1.484 # millimeters/microsecond + hydrophone_locations = rospy.get_param('~/sonar_test/hydrophones') + hydrophone_array = ReceiverArraySim(hydrophone_locations, c) + sonar = Multilaterator(hydrophone_locations, c, 'LS') + + # # Simulate individual pulses (Debugging Jakes Board) + # pulse = Pulse(-5251, -7620, 1470, 0) + # tstamps = hydrophone_array.listen(pulse) + # tstamps = tstamps - tstamps[0] + # print_green(pulse.__repr__()) + # print "Perfect timestamps: (microseconds)\n\t", tstamps + # res_msg = sonar.getPulseLocation(np.array(tstamps)) + # res = np.array([res_msg.x, res_msg.y, res_msg.z]) + # print "\t\x1b[33m".ljust(22) + error(res, pulse.position()) + "\x1b[0m" + + # pulses will be generated with inside a cube with side-length $(pulse_range) (mm) + try: + for h in range(3,8): + # smallest cube will be a meter wide, largest will be 10 km wide + pulse_range = 10**h # in mm + rand_args = [-pulse_range, pulse_range + 1] + num_pulses = 10 + print "\n\x1b[1mGenerating " + str(num_pulses) + " pulses within a " \ + + str(2*pulse_range/1000) + " meters wide cube\x1b[0m\n" + + for i in range(num_pulses): + pulse = Pulse(random.randrange(*rand_args), + random.randrange(*rand_args), + random.randrange(*rand_args), 0) + tstamps = hydrophone_array.listen(pulse) + tstamps = tstamps - tstamps[0] + print_green(str(i).ljust(2) + str(pulse)) + print "Perfect timestamps: (microseconds)\n\t", tstamps + res_msg = sonar.getPulseLocation(np.array(tstamps)) + delete_last_lines(4) # more concise output + res = np.array([res_msg.x, res_msg.y, res_msg.z]) + print "\t\x1b[33m".ljust(22) + error(res, pulse.position()) + "\x1b[0m" + print "Progressively adding noise to timestamps..." + + for j in range(-5, 2): + sigma = 10**j + noisy_tstamps = [x + np.random.normal(0, sigma) for x in tstamps] + noisy_tstamps[0] = 0 + print "Noisy timestamps:\n\t", noisy_tstamps + res_msg = sonar.getPulseLocation(np.array(noisy_tstamps)) + res = np.array([res_msg.x, res_msg.y, res_msg.z]) + delete_last_lines(4) # more concise output + print "\t\x1b[33m" + ("sigma: " + str(sigma)).ljust(16) \ + + error(res, pulse.position()) + "\x1b[0m" + except KeyboardInterrupt: + print "\nAborting mutilateration tests prematurely" diff --git a/drivers/mil_passive_sonar/package.xml b/drivers/mil_passive_sonar/package.xml new file mode 100644 index 00000000..f3f79469 --- /dev/null +++ b/drivers/mil_passive_sonar/package.xml @@ -0,0 +1,16 @@ + + + mil_passive_sonar + 1.0.0 + Package for multilateration and passive sonar + + Matthew Langford + David Soto + + MIT + + catkin + rospy + rospy + + diff --git a/drivers/mil_passive_sonar/paulboard_driver/README.md b/drivers/mil_passive_sonar/paulboard_driver/README.md new file mode 100644 index 00000000..14c960ce --- /dev/null +++ b/drivers/mil_passive_sonar/paulboard_driver/README.md @@ -0,0 +1,19 @@ +# Paulboard Driver + +The scripts here are used to run the old 'Paulboard', the hydrophone board, +that MIL started using in 2013. + +The driver node in this package used to interface with the old 'hydrophone' +package. Communication would occur via custom ROS msg's. + +## TODO +If there is a need or want to continue using this board, as opposed to Jake +Easterling's, then the sonar_driver node in mil_passive_sonar should be, +ammended to be able to import the paulboard_driver code. + +## Explanation of `permute` parameter + +A `permute` value of `w x y z` means that the connector labeled Aw on the PCB +maps to hydrophone 0 as the hydrophones package wants it, Ax maps to 1, Ay +maps to 2, and Az maps to 3. + diff --git a/drivers/mil_passive_sonar/paulboard_driver/SimpleHyd2013.bin b/drivers/mil_passive_sonar/paulboard_driver/SimpleHyd2013.bin new file mode 100644 index 00000000..4b4c50a0 Binary files /dev/null and b/drivers/mil_passive_sonar/paulboard_driver/SimpleHyd2013.bin differ diff --git a/drivers/mil_passive_sonar/paulboard_driver/__init__.py b/drivers/mil_passive_sonar/paulboard_driver/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/drivers/mil_passive_sonar/paulboard_driver/ais_bootloader.py b/drivers/mil_passive_sonar/paulboard_driver/ais_bootloader.py new file mode 100755 index 00000000..1ac1dcd8 --- /dev/null +++ b/drivers/mil_passive_sonar/paulboard_driver/ais_bootloader.py @@ -0,0 +1,147 @@ +import logging +import serial +import time + +MAGICWORD = 0x41504954; +XMT_START_WORD = 0x58535441; +RCV_START_WORD = 0x52535454; + +OP_BOOTTABLE = 0x58535907; +OP_SEQREADEN = 0x58535963; +OP_SECTIONLOAD = 0x58535901; +OP_CSECTIONLOAD = 0x58535909; +OP_SECTIONFILL = 0x5853590A; +OP_FXNEXEC = 0x5853590D; +OP_JUMP = 0x58535905; +OP_JUMPCLOSE = 0x58535906; +OP_CRCEN = 0x58535903; +OP_CRCDIS = 0x58535904; +OP_CRCREQ = 0x58535902; +OP_READWAIT = 0x58535914; +OP_STARTOVER = 0x58535908; +OP_PINGDEVICE = 0x5853590B; + +def op2ack(op): + return (op & ~0x0F000000) | 0x02000000 + +class Exception(RuntimeError): + pass + +def delay(): + time.sleep(5/1000) + +def word2str(word): + return chr(word & 0xFF) + \ + chr((word >> 8) & 0xFF) + \ + chr((word >> 16) & 0xFF) + \ + chr((word >> 24) & 0xFF) + +def str2word(s): + vals = map(ord, s) + return vals[0] + (vals[1] << 8) + (vals[2] << 16) + (vals[3] << 24) + +def read_word_timeout(ser): + s = ser.read(4) + if len(s) < 4: + raise Exception('Timeout while reading word') + return str2word(s) + +def sws(ser): + """Start word sync""" + logging.debug('Performing SWS') + ser.write(chr(XMT_START_WORD >> 24)) + while True: + response = ser.read(1) + if len(response) == 0: + raise Exception('Timeout during start word sync') + if ord(response[0]) == (RCV_START_WORD >> 24): + return True + +def pos(ser, N=10): + """Ping opcode sync""" + logging.debug('Performing POS') + ser.write(word2str(OP_PINGDEVICE)) + if read_word_timeout(ser) != op2ack(OP_PINGDEVICE): + raise Exception('Invalid response to ping opcode') + delay() + + ser.write(word2str(N)) + if read_word_timeout(ser) != N: + raise Exception('Invalid response to ping count') + + for i in xrange(1, N+1): + ser.write(word2str(i)) + if read_word_timeout(ser) != i: + raise Exception('Invalid response to ping %d' % i) + return True + +def os(ser, op): + ser.write(word2str(op)) + response = read_word_timeout(ser) + if response != op2ack(op): + raise Exception('Invalid response to opcode (%x, expected %x)' % (response, op2ack(op))) + +def boot(ser, file): + magic = str2word(file.read(4)) + if magic != MAGICWORD: + raise Exception('Invalid magic word in file') + + sws(ser) + pos(ser) + + while True: + delay() + op = str2word(file.read(4)) + os(ser, op) + + if op == OP_SECTIONLOAD or op == OP_CSECTIONLOAD: + addr = str2word(file.read(4)) + ser.write(word2str(addr)) + size = str2word(file.read(4)) + ser.write(word2str(size)) + logging.debug('SECTIONLOAD of %d bytes to 0x%x' % (size, addr)) + ser.write(file.read(size)) + elif op == OP_FXNEXEC: + args = str2word(file.read(4)) + ser.write(word2str(args)) + words = (args >> 16)*4 + logging.debug('FXNEXEC of %d bytes' % words) + ser.write(file.read(words)) + elif op == OP_JUMPCLOSE: + addr = str2word(file.read(4)) + logging.debug('JUMPLOAD to 0x%x' % addr) + ser.write(word2str(addr)) + break + elif op == OP_CRCEN: + logging.debug('Enabled CRC') + elif op == OP_CRCDIS: + logging.debug('Disabled CRC') + elif op == OP_CRCREQ: + calc_crc = read_word_timeout(ser) + real_crc = str2word(file.read(4)) + seek = str2word(file.read(4)) + if seek > 0x7FFFFFFF: + seek = (seek-1) - 0xFFFFFFFF + if real_crc == calc_crc: + logging.debug('CRC passed') + else: + logging.debug('CRC failed') + file.seek(seek, 1) + else: + raise Exception('Unknown opcode 0x%x' % op) + + donestr = ser.read(8) + if donestr != " DONE\0": + raise Exception('Invalid done string: %s' % donestr) + + return True + +def main(): + ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1) + with open('SimpleHyd2013.bin') as file: + boot(ser, file) + +if __name__ == '__main__': + logging.basicConfig(level=logging.DEBUG) + main() + diff --git a/drivers/mil_passive_sonar/paulboard_driver/paulboard_driver b/drivers/mil_passive_sonar/paulboard_driver/paulboard_driver new file mode 100755 index 00000000..d5dcbafd --- /dev/null +++ b/drivers/mil_passive_sonar/paulboard_driver/paulboard_driver @@ -0,0 +1,149 @@ +#!/usr/bin/env python + +import serial +import numpy + +import roslib +roslib.load_manifest('paulboard_driver') +import rospy + +from std_msgs.msg import Header + +from paulboard_driver import ais_bootloader +from hydrophones.msg import Ping +from hydrophones import util + +class Node(object): + def __init__(self, ser): + self.sample_rate = rospy.get_param('~sample_rate', 300e3) + self.thresh = rospy.get_param('~thresh', 500) + self.frame = rospy.get_param('~frame', '/hydrophones') + permute_str = rospy.get_param('~permute', '1 2 3 4') + self.permute = [int(v)-1 for v in permute_str.split(' ')] + self.ser = ser + self.pub = rospy.Publisher('hydrophones/ping', Ping) + self.buf = "" + self.last_packet_num = None + + self.ser.baudrate = 1152000 + + def run_command(self, cmd): + self.ser.timeout = .2 + self.ser.write(cmd) + response = self.ser.read(128) + if 'OK' not in response: + raise RuntimeError('Invalid response to command "%s"\n got: "%s"' % (cmd, response)) + + def run(self): + self.run_command('samplerate set\r%d\r' % (self.sample_rate)) + self.run_command('thresh set\r%d\r' % (self.thresh)) + self.ser.write('go\r') + self.ser.read(99999) + self.ser.timeout = .1 + rospy.loginfo('Paulboard running') + + buf = "" + while not rospy.is_shutdown(): + got = self.ser.read(10240) + if len(got) > 0: + buf += got + elif len(buf) > 0: + self.parse_publish_ping(buf) + buf = "" + + def parse_publish_ping(self, buf): + result = self.try_parse_ping(buf) + if result is None: + rospy.logerr('Got bad ping packet') + return + + packet_num, samples = result + if self.last_packet_num is not None and packet_num != self.last_packet_num+1: + rospy.logwarn('Dropped %d ping packet(s)' % (packet_num - (self.last_packet_num+1))) + self.last_packet_num = packet_num + + samples = samples[self.permute, :] + + self.pub.publish(Ping( + header=Header(stamp=rospy.Time.now(), + frame_id=self.frame), + channels=samples.shape[0], + samples=samples.shape[1], + data=util.samples_to_list(samples), + sample_rate=self.sample_rate)) + + def try_parse_ping(self, buf): + # Verify we got a header + if len(buf) < 8: + return None + header = word16(buf[0:2]) + if header != 0x1234: + return None + + # Parse header, verify we have all data + packet_num = word16(buf[2:4]) + sample_rate = word16(buf[4:6])*1000 + sample_count = word16(buf[6:8]) + if len(buf) < 8 + 8*sample_count + 4: + return None + + # Parse all samples into an array of column vectors + pos = 8 + samples = numpy.zeros((4, sample_count), numpy.uint16) + for sample in xrange(sample_count): + for chan in xrange(4): + samples[chan, sample] = word16(buf[pos:pos+2]) + pos += 2 + + # Parse footer + checksum = word16(buf[pos:pos+2]) + footer = word16(buf[pos+2:pos+4]) + if footer != 0x4321: + return None + + return (packet_num, samples) + +def word16(str): + return ord(str[0]) + (ord(str[1]) << 8) + +def check_board_bootloaded(ser): + ser.timeout = .5 + ser.write('\r') + got = ser.read(99999) + if got.endswith('> '): + return True + return False + +def bootload_board(ser): + path = roslib.packages.resource_file('paulboard_driver', 'blobs', 'SimpleHyd2013.bin') + print 'Bin path: ', path + ser.timeout = 1 + ser.baudrate = 115200 + with open(path, 'rb') as file: + ais_bootloader.boot(ser, file) + rospy.sleep(0.5) # Give program time to start + +if __name__ == '__main__': + rospy.init_node('paulboard_driver') + port = rospy.get_param('~port') + print 'Port: ', port + + with serial.Serial(port, 115200, timeout=.3) as ser: + if check_board_bootloaded(ser): + rospy.loginfo('PaulBoard already bootloaded') + else: + rospy.loginfo('Bootloading PaulBoard') + while True: + try: + bootload_board(ser) + rospy.loginfo('Bootloading complete') + break + except ais_bootloader.Exception, ex: + rospy.logerr('Failed to bootload: %s' % ex) + if rospy.is_shutdown(): + break + rospy.sleep(1) + + Node(ser).run() + + diff --git a/drivers/mil_passive_sonar/scripts/generate_dtoa.py b/drivers/mil_passive_sonar/scripts/generate_dtoa.py new file mode 100755 index 00000000..b23c3b68 --- /dev/null +++ b/drivers/mil_passive_sonar/scripts/generate_dtoa.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +from multilateration import * +import numpy as np +import argparse + +h0 = {'x' : 0, 'y': 0, 'z' : 0} +h1 = {'x' : 25.4, 'y': 0, 'z' : 0} +h2 = {'x' : -25.4, 'y': 0, 'z' : 0} +h3 = {'x' : 0, 'y': 25.4, 'z' : 0} +hydrophone_locations = [h1, h2, h3] + +def dict_to_xyz_vec(pos_dict): + ''' + Converts a dictionary with 'x', 'y', and 'z' keys to a numpy array + ''' + p = pos_dict + return np.array([p['x'], p['y'], p['z']]) + +def get_dtoa(c, sensor_position, source): + ''' + Calculates the dtoa for a signal and a sensor that is ref_offset away from the reference sensor + c should be in millimeter per microsecond + position params should have units of mm + ''' + assert(isinstance(sensor_position, np.ndarray)) + assert(isinstance(source, np.ndarray)) + assert(len(sensor_position) == 3) + assert(len(source) == 3) + t_ref = np.linalg.norm(source) / c + t = np.linalg.norm(source - sensor_position) / c + delta_t = t_ref - t + #print __name__, sensor_position, source, c, t_ref, t, delta_t + return delta_t + +def get_batch_dtoa(c, sensor_positions, source): + ''' + Calculates the dtoa's of a signal to a list of sensors(positions). The reference + is assumed to be at the origin. + c should be in millimeter per microsecond + position params should have units of mm + ''' + dtoa = [] + for x in sensor_positions: + dtoa.append(get_dtoa(c, x, source)) + return dtoa + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Generates dtoa measurements given sensor and source locations', + usage='Pass the path to a file with source locations. Each row should have 3 columns' + + 'The columns should have the x, y, and z coordinates of each source location, ' \ + 'separated by spaces or tabs. Coordinates should be in units of millimeters.') + parser.add_argument('path_to_input') + args = parser.parse_args() + sonar_test = np.loadtxt(args.path_to_input) + sensors = [dict_to_xyz_vec(x) for x in hydrophone_locations] + res = [] + for case in sonar_test: + res.append(get_batch_dtoa(1.484, sensors, case)) + res = np.array(res) + # output format is {numbers of input rows} rows w/ 3 columns (dtoa measurements in microseconds) + print res + diff --git a/drivers/mil_passive_sonar/setup.py b/drivers/mil_passive_sonar/setup.py new file mode 100644 index 00000000..49a04b0c --- /dev/null +++ b/drivers/mil_passive_sonar/setup.py @@ -0,0 +1,13 @@ +# ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD + +from distutils.core import setup +from catkin_pkg.python_setup import generate_distutils_setup + +# fetch values from package.xml +setup_args = generate_distutils_setup( + packages=['mil_passive_sonar', + 'multilateration', + 'paulboard_driver'], +) + +setup(**setup_args) diff --git a/odometry_tools b/odometry_tools index b23dc5d7..299eb06d 160000 --- a/odometry_tools +++ b/odometry_tools @@ -1 +1 @@ -Subproject commit b23dc5d74b941dd36159fce030479d9772c58113 +Subproject commit 299eb06d1c538085ac2cb86a2ff2ba070042fed0 diff --git a/perception/mil_vision/CMakeLists.txt b/perception/mil_vision/CMakeLists.txt new file mode 100644 index 00000000..a5508733 --- /dev/null +++ b/perception/mil_vision/CMakeLists.txt @@ -0,0 +1,106 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mil_vision) + +# set c++11 as default, overide with set_target_properties() +# if needed for specific nodes (cough..cough... PCL) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp -g -pedantic -Wall -std=c++11 ") + +find_package(catkin + REQUIRED COMPONENTS + roscpp + rospy + eigen_conversions + rostime + image_transport + image_geometry + cv_bridge + message_generation + mil_msgs + std_msgs + std_srvs + geometry_msgs + sensor_msgs + tf + pcl_ros + tf2_sensor_msgs + tf2_geometry_msgs + mil_tools +) + +find_package(PCL 1.7 REQUIRED) +find_package(Boost REQUIRED date_time filesystem) + +catkin_python_setup() + +catkin_package( + INCLUDE_DIRS + include + LIBRARIES + mil_vision_lib + mil_sparsestereo + CATKIN_DEPENDS + roscpp + rospy + message_runtime + std_msgs + geometry_msgs + sensor_msgs + mil_msgs + mil_tools + pcl_ros + DEPENDS + system_lib + image_transport + image_geometry + cv_bridge + mil_msgs +) + +include_directories( + include + SYSTEM + ${PCL_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${catkin_INCLUDE_DIRS} +) +link_directories(${PCL_LIBRARY_DIRS}) +add_definitions(${PCL_DEFINITIONS}) + +FILE(GLOB EXFAST_SOURCES exFAST_SparseStereo/src/sparsestereo/*) +add_library( mil_sparsestereo SHARED ${EXFAST_SOURCES}) +target_include_directories(mil_sparsestereo PUBLIC exFAST_SparseStereo/src) +target_link_libraries(mil_sparsestereo ${catkin_LIBRARIES} ${Boost_LIBRARIES} ${OpenCV_INCLUDE_DIRS}) +set_target_properties(mil_sparsestereo PROPERTIES COMPILE_FLAGS "-O3 -DNDEBUG -fopenmp -g -Wall -march=native -msse -msse2 -msse3 -mssse3 -msse4 -ffast-math -mfpmath=sse") + +add_library( + mil_vision_lib + src/mil_vision_lib/cv_utils.cc + src/mil_vision_lib/image_filtering.cpp + src/mil_vision_lib/active_contours.cpp + src/mil_vision_lib/colorizer/pcd_colorizer.cpp + src/mil_vision_lib/colorizer/single_cloud_processor.cpp + src/mil_vision_lib/colorizer/camera_observer.cpp + src/mil_vision_lib/colorizer/color_observation.cpp +) +target_include_directories(mil_vision_lib PUBLIC include/mil_vision_lib) +target_link_libraries(mil_vision_lib ${catkin_LIBRARIES} ${Boost_LIBRARIES} ${PCL_COMMON_LIBRARIES} ${PCL_COMMON_LIBRARIES}) + +add_executable(camera_stream_demo src/camera_stream_demo.cpp) +add_dependencies(camera_stream_demo mil_vision_lib ${catkin_EXPORTED_TARGETS}) +target_link_libraries( + camera_stream_demo + mil_vision_lib + ${catkin_LIBRARIES} + ${Boost_LIBRARIES} + ${OpenCV_INCLUDE_DIRS} +) + +add_executable(camera_lidar_transformer src/camera_lidar_transformer.cpp) +add_dependencies(camera_lidar_transformer mil_vision_lib ${catkin_EXPORTED_TARGETS}) +target_link_libraries( + camera_lidar_transformer + mil_vision_lib + ${catkin_LIBRARIES} + ${Boost_LIBRARIES} + ${OpenCV_INCLUDE_DIRS} +) diff --git a/perception/mil_vision/exFAST_SparseStereo/GPL.txt b/perception/mil_vision/exFAST_SparseStereo/GPL.txt new file mode 100644 index 00000000..94a9ed02 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/GPL.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/perception/mil_vision/exFAST_SparseStereo/README.txt b/perception/mil_vision/exFAST_SparseStereo/README.txt new file mode 100644 index 00000000..752d37c7 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/README.txt @@ -0,0 +1,38 @@ +exFAST and High-Performance Sparse Stereo Matcher + +This source code is published under the GNU General Public License version 3, +of which a copy is distributed with this code. Exempt from this license is the +FAST9 detector (file: src/sparsestereo/fast9-inl.h), which was originally +written by Edward Rosten and has only been modified by us. + +The source code makes heavy use of the GCC vector extensions to take advantage +of the SSE instruction sets. Hence, the code cannot be complied with a +different compiler without making major modifications. + +We included a simple example application that demonstrates how to use our code. +Also included is one example stereo pair with a matching camera calibration +file. + +Some notes about camera calibration: We use the OpenCV functions +stereoCalibrate and stereoRectify to obtain the calibration and rectification +data. The example application loads those settings from an XML file. The +meaning of the individual data items is as follows: + +M1: Camera matrix of left camera (from stereoCalibrate) +M2: Camera matrix of right camera (from steroCalibrate) +D1: Distortion coefficients for left camera (from stereoCalibrate) +D2: Distortion coefficients for right camera (from stereoCalibrate) +R1: Rectification transformation for left camera (from stereoRectify) +R2: Rectification transformation for right camera (from stereoRectify) +P1: Projection matrix for left camera (from stereoRectify) +P2: Projection matrix for right camera (from stereoRectify) +Q: Disparity-to-depth mapping matrix (from stereoRectify) +size: Image size of one camera image + +If you use our code for any research that will be published, we kindly ask you +to cite our paper: + +Konstantin Schauwecker, Reinhard Klette, and Andreas Zell. A New Feature +Detector and Stereo Matching Method for Accurate High-Performance Sparse Stereo +Matching. In IEEE/RSJ International Conference on Intelligent Robots and +Systems (IROS). IEEE, October 2012. diff --git a/perception/mil_vision/exFAST_SparseStereo/example-data/calibration.xml b/perception/mil_vision/exFAST_SparseStereo/example-data/calibration.xml new file mode 100644 index 00000000..ccb6f2c2 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/example-data/calibration.xml @@ -0,0 +1,79 @@ + + + + 3 + 3 +
d
+ + 6.5241357652015370e+02 0. 3.4737478196748043e+02 0. + 6.5434070705887893e+02 2.2621555342556007e+02 0. 0. 1.
+ + 1 + 5 +
d
+ + -4.3763636007329099e-01 2.8006488733035506e-01 + 4.0981386272831622e-04 -1.1888413625068292e-03 + -1.4136761880997478e-01
+ + 3 + 3 +
d
+ + 6.5181248374572181e+02 0. 3.3187853747142810e+02 0. + 6.5339335949419046e+02 2.2968726339955754e+02 0. 0. 1.
+ + 1 + 5 +
d
+ + -4.3932608881272195e-01 2.9797543665860654e-01 + -4.7999780141084688e-04 -1.0480857219477820e-03 + -1.6575380391655467e-01
+ + 3 + 3 +
d
+ + 9.9964441516773250e-01 -5.8620392937120611e-03 + 2.6013068240420170e-02 5.8033365360402080e-03 9.9998044230621075e-01 + 2.3315853243672281e-03 -2.6026227329585080e-02 + -2.1797936586566173e-03 9.9965888381517143e-01
+ + 3 + 3 +
d
+ + 9.9993558455087461e-01 5.2798013266097798e-03 + -1.0047409957402902e-02 -5.3024541086745978e-03 + 9.9998345699189362e-01 -2.2292875470448703e-03 + 1.0035473547670247e-02 2.2824198766964580e-03 9.9994703851263078e-01
+ + 3 + 4 +
d
+ + 5.4581318104785521e+02 0. 3.3994384384155273e+02 0. 0. + 5.4581318104785521e+02 2.2540025138854980e+02 0. 0. 0. 1. 0.
+ + 3 + 4 +
d
+ + 5.4581318104785521e+02 0. 3.3994384384155273e+02 + -5.9573462113779023e+01 0. 5.4581318104785521e+02 + 2.2540025138854980e+02 0. 0. 0. 1. 0.
+ + 4 + 4 +
d
+ + 1. 0. 0. -3.3994384384155273e+02 0. 1. 0. -2.2540025138854980e+02 0. + 0. 0. 5.4581318104785521e+02 0. 0. -9.1620188198129178e+00 0.
+ + 2 + 1 +
i
+ + 640 480
+
diff --git a/perception/mil_vision/exFAST_SparseStereo/example-data/left.png b/perception/mil_vision/exFAST_SparseStereo/example-data/left.png new file mode 100644 index 00000000..0c5b9a18 Binary files /dev/null and b/perception/mil_vision/exFAST_SparseStereo/example-data/left.png differ diff --git a/perception/mil_vision/exFAST_SparseStereo/example-data/right.png b/perception/mil_vision/exFAST_SparseStereo/example-data/right.png new file mode 100644 index 00000000..dfdcd941 Binary files /dev/null and b/perception/mil_vision/exFAST_SparseStereo/example-data/right.png differ diff --git a/perception/mil_vision/exFAST_SparseStereo/src/example/example.cpp b/perception/mil_vision/exFAST_SparseStereo/src/example/example.cpp new file mode 100644 index 00000000..4c6426eb --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/example/example.cpp @@ -0,0 +1,144 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +// This is a minimalistic example on how to use the extended +// FAST feature detector and the sparse stereo matcher. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace cv; +using namespace sparsestereo; +using namespace boost; +using namespace boost::posix_time; + +int main(int argc, char** argv) { + try { + // Stereo matching parameters + double uniqueness = 0.7; + int maxDisp = 70; + int leftRightStep = 2; + + // Feature detection parameters + double adaptivity = 1.0; + int minThreshold = 10; + + // Parse arguments + if(argc != 3 && argc != 4) { + cout << "Usage: " << argv[0] << " LEFT-IMG RIGHT-IMG [CALIBRARION-FILE]" << endl; + return 1; + } + char* leftFile = argv[1]; + char* rightFile = argv[2]; + char* calibFile = argc == 4 ? argv[3] : NULL; + + // Read input images + cv::Mat_ leftImg, rightImg; + leftImg = imread(leftFile, CV_LOAD_IMAGE_GRAYSCALE); + rightImg = imread(rightFile, CV_LOAD_IMAGE_GRAYSCALE); + + if(leftImg.data == NULL || rightImg.data == NULL) + throw sparsestereo::Exception("Unable to open input images!"); + + // Load rectification data + StereoRectification* rectification = NULL; + if(calibFile != NULL) + rectification = new StereoRectification(CalibrationResult(calibFile)); + + // The stereo matcher. SSE Optimized implementation is only available for a 5x5 window + SparseStereo, short> stereo(maxDisp, 1, uniqueness, + rectification, false, false, leftRightStep); + + // Feature detectors for left and right image + FeatureDetector* leftFeatureDetector = new ExtendedFAST(true, minThreshold, adaptivity, false, 2); + FeatureDetector* rightFeatureDetector = new ExtendedFAST(false, minThreshold, adaptivity, false, 2); + + ptime lastTime = microsec_clock::local_time(); + vector correspondences; + + // Objects for storing final and intermediate results + cv::Mat_ charLeft(leftImg.rows, leftImg.cols), + charRight(rightImg.rows, rightImg.cols); + Mat_ censusLeft(leftImg.rows, leftImg.cols), + censusRight(rightImg.rows, rightImg.cols); + vector keypointsLeft, keypointsRight; + + // For performance evaluation we do the stereo matching 100 times + for(int i=0; i< 100; i++) { + // Featuredetection. This part can be parallelized with OMP + #pragma omp parallel sections default(shared) num_threads(2) + { + #pragma omp section + { + ImageConversion::unsignedToSigned(leftImg, &charLeft); + Census::transform5x5(charLeft, &censusLeft); + keypointsLeft.clear(); + leftFeatureDetector->detect(leftImg, keypointsLeft); + } + #pragma omp section + { + ImageConversion::unsignedToSigned(rightImg, &charRight); + Census::transform5x5(charRight, &censusRight); + keypointsRight.clear(); + rightFeatureDetector->detect(rightImg, keypointsRight); + } + } + + // Stereo matching. Not parallelized (overhead too large) + correspondences.clear(); + stereo.match(censusLeft, censusRight, keypointsLeft, keypointsRight, &correspondences); + } + + // Print statistics + time_duration elapsed = (microsec_clock::local_time() - lastTime); + cout << "Time for 100x stereo matching: " << elapsed.total_microseconds()/1.0e6 << "s" << endl + << "Features detected in left image: " << keypointsLeft.size() << endl + << "Features detected in right image: " << keypointsRight.size() << endl + << "Percentage of matched features: " << (100.0 * correspondences.size() / keypointsLeft.size()) << "%" << endl; + + // Highlight matches as colored boxes + Mat_ screen(leftImg.rows, leftImg.cols); + cvtColor(leftImg, screen, CV_GRAY2BGR); + + for(int i=0; i<(int)correspondences.size(); i++) { + double scaledDisp = (double)correspondences[i].disparity() / maxDisp; + Vec3b color; + if(scaledDisp > 0.5) + color = Vec3b(0, (1 - scaledDisp)*512, 255); + else color = Vec3b(0, 255, scaledDisp*512); + + rectangle(screen, correspondences[i].imgLeft->pt - Point2f(2,2), + correspondences[i].imgLeft->pt + Point2f(2, 2), + (Scalar) color, CV_FILLED); + } + + // Display image and wait + namedWindow("Stereo"); + imshow("Stereo", screen); + waitKey(); + + // Clean up + delete leftFeatureDetector; + delete rightFeatureDetector; + if(rectification != NULL) + delete rectification; + + return 0; + } + catch (const std::exception& e) { + cerr << "Fatal exception: " << e.what(); + return 1; + } +} diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/CMakeLists.txt b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/CMakeLists.txt new file mode 100644 index 00000000..e57cf787 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/CMakeLists.txt @@ -0,0 +1,26 @@ +add_library(sparsestereo + calibrationresult.h + calibrationresult.cpp + censuswindow.h + exception.h + extendedfast.cpp + extendedfast.h + fast9.h + fast9-inl.h + hammingdistance.cpp + hammingdistance.h + simd.h + simd.cpp + sparsematch.h + sparserectification.cpp + sparserectification.h + sparsestereo.h + sparsestereo-inl.h + stereorectification.cpp + stereorectification.h + imageconversion.h + imageconversion.cpp + census.h + census-inl.h + census.cpp +) diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/calibrationresult.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/calibrationresult.cpp new file mode 100644 index 00000000..8f64530e --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/calibrationresult.cpp @@ -0,0 +1,55 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "calibrationresult.h" +#include +#include "exception.h" + +namespace sparsestereo { + using namespace cv; + using namespace std; + + CalibrationResult::CalibrationResult(const char* file) { + FileStorage fs(file, CV_STORAGE_READ); + if(!fs.isOpened()) + throw Exception("Unable to read calibration results"); + + fs["M1"] >> cameraMatrix[0]; + fs["D1"] >> distCoeffs[0]; + fs["M2"] >> cameraMatrix[1]; + fs["D2"] >> distCoeffs[1]; + fs["R1"] >> R[0]; + fs["R2"] >> R[1]; + fs["P1"] >> P[0]; + fs["P2"] >> P[1]; + fs["Q"] >> Q; + fs["T"] >> T; + + Mat_ sz(2, 1); + fs["size"] >> sz; + imageSize.width = sz(0, 0); + imageSize.height = sz(1, 0); + + fs.release(); + } + + void CalibrationResult::writeToFile(const char * file) const { + FileStorage fs(file, CV_STORAGE_WRITE); + if(!fs.isOpened()) + throw Exception("Unable to store calibration results"); + + fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] + << "M2" << cameraMatrix[1] << "D2" << distCoeffs[1] + << "R1" << R[0] << "R2" << R[1] << "P1" << P[0] << "P2" << P[1] << "Q" << Q + << "T" << T; + + Mat_ sz(2, 1); + sz(0, 0) = imageSize.width; + sz(1, 0) = imageSize.height; + fs << "size" << sz; + + fs.release(); + } +} diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/calibrationresult.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/calibrationresult.h new file mode 100644 index 00000000..872aa787 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/calibrationresult.h @@ -0,0 +1,28 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_CALIBRATIONRESULT_H +#define SPARSESTEREO_CALIBRATIONRESULT_H + +#include + +namespace sparsestereo { + // Stores the results of a stereo camera calibration + struct CalibrationResult { + cv::Mat_ cameraMatrix[2], distCoeffs[2]; + cv::Mat_ R[2], P[2], Q, T; + cv::Size imageSize; + + CalibrationResult() { + cameraMatrix[0] = cv::Mat_::eye(3, 3); + cameraMatrix[1] = cv::Mat_::eye(3, 3); + } + CalibrationResult(const char* file); + + void writeToFile(const char* file) const; + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/census-inl.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/census-inl.h new file mode 100644 index 00000000..d22e3839 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/census-inl.h @@ -0,0 +1,59 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_CENSUS_INL_H +#define SPARSESTEREO_CENSUS_INL_H + +#include "exception.h" +#include "census.h" + +namespace sparsestereo { + template + void Census::transform5x5(const cv::Mat_& input, cv::Mat_* output) { + int maxX=input.cols-2, maxY=input.rows-2; + + for(int y=2; y input(y-2, x-2)) << 24) | + ((centoid > input(y-2, x-1)) << 23) | + ((centoid > input(y-2, x)) << 22) | + ((centoid > input(y-2, x+1)) << 21) | + ((centoid > input(y-2, x+2)) << 20) | + // Row 2 + ((centoid > input(y-1, x-2)) << 19) | + ((centoid > input(y-1, x-1)) << 18) | + ((centoid > input(y-1, x)) << 17) | + ((centoid > input(y-1, x+1)) << 16) | + ((centoid > input(y-1, x+2)) <<15) | + // Row 3 + ((centoid > input(y, x-2)) << 14) | + ((centoid > input(y, x-1)) << 13) | + ((centoid > input(y, x)) << 12) | + ((centoid > input(y, x+1)) << 11) | + ((centoid > input(y, x+2)) << 10) | + // Row 4 + ((centoid > input(y+1, x-2)) << 9) | + ((centoid > input(y+1, x-1)) << 8) | + ((centoid > input(y+1, x)) << 7) | + ((centoid > input(y+1, x+1)) << 6) | + ((centoid > input(y+1, x+2)) << 5) | + // Row 5 + ((centoid > input(y+2, x-2)) << 4) | + ((centoid > input(y+2, x-1)) << 3) | + ((centoid > input(y+2, x)) << 2) | + ((centoid > input(y+2, x+1)) << 1) | + (centoid > input(y+2, x+2)); + } + } + + template <> + void Census::transform5x5(const cv::Mat_& input, cv::Mat_* output); +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/census.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/census.cpp new file mode 100644 index 00000000..0bbab689 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/census.cpp @@ -0,0 +1,109 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "census-inl.h" + +namespace sparsestereo { + using namespace std; + using namespace cv; + + template <> + void Census::transform5x5(const Mat_& input, Mat_* output) { + transform5x5SSE(input, output); + } + + void Census::transform5x5SSE(const Mat_& input, Mat_* output) { + // Predeclare required constants + const v16qi const01 = SIMD::scalar16(0x01); + + // We skip one row at the beginning and end to avoid range checking + for(int y=2; y +#include "simd.h" + +namespace sparsestereo { + // Computes various variants of the census transform + class Census { + public: + // Census transform using 5x5 window + template + static void transform5x5(const cv::Mat_& input, cv::Mat_* output); + + private: + // SSE optimized implementations + static void transform5x5SSE(const cv::Mat_& input, cv::Mat_* output); + + // Efficiently stores 4 byte blocks by interleaving four 1-byte vectors + static __always_inline void storeSSEVec(const v16qi& byte4, const v16qi& byte3, const v16qi& byte2, + const v16qi& byte1, char* dst); + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/censuswindow.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/censuswindow.h new file mode 100644 index 00000000..9ce555c0 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/censuswindow.h @@ -0,0 +1,237 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_CENSUSWINDOW_H +#define SPARSESTEREO_CENSUSWINDOW_H + +#include +#include +#include "hammingdistance.h" +#include "simd.h" + +//#define SPARSESTEREO_NO_POPCNT //Defined by CMake + +#if defined(SPARSESTEREO_NO_POPCNT) +#define SPARSESTEREO_SSE_OPTIMIZE +#endif + +namespace sparsestereo { + // Matches over a set of census windows + template + class CensusWindow { + public: + CensusWindow() { +#ifdef SPARSESTEREO_SSE_OPTIMIZE + lookupTablePtr = hammingDist.getLookupTable(); + lookupTableVec = SIMD::scalar4((int)hammingDist.getLookupTable()); + zero = SIMD::scalar16(0); +#endif + } + + void setReferenceImage(const cv::Mat_& image) { + refImage = image; + } + + void setComparisonImage(const cv::Mat_& image) { + compImage = image; + } + + const cv::Mat_& getReferenceImage() const {return refImage;} + const cv::Mat_& getComparisonImage() const {return compImage;} + const int getWindowSize() const {return SIZE;} + + // Sets the position of the reference window + void setReferencePoint(const cv::Point2i& point); + + // Performs a window matching using census transformed images + __always_inline short match(cv::Point2i point) const; + + private: + v4si lookupTableVec; + v4si refWindow[6]; + v16qi zero; + + const unsigned char* lookupTablePtr; + HammingDistance hammingDist; + cv::Mat_ refImage; + cv::Mat_ compImage; + cv::Point2i refPoint; + + // Stores a window in two SSE vectors + void loadWindow(const cv::Mat_& image, const cv::Point2i& point, v4si* dstWindow) const; + } __attribute__ ((aligned (16))); + + template + void CensusWindow::loadWindow(const cv::Mat_& image, const cv::Point2i& point, v4si* dstWindow) const {} + +#ifdef SPARSESTEREO_SSE_OPTIMIZE + template <> + void CensusWindow<5>::loadWindow(const cv::Mat_& image, const cv::Point2i& point, v4si* dstWindow) const { + dstWindow[0] = (v4si)__builtin_ia32_loaddqu((char*)&image(point.y-2, point.x-2)); + dstWindow[1] = (v4si)__builtin_ia32_loaddqu((char*)&image(point.y-1, point.x-2)); + dstWindow[2] = (v4si)__builtin_ia32_loaddqu((char*)&image(point.y, point.x-2)); + dstWindow[3] = (v4si)__builtin_ia32_loaddqu((char*)&image(point.y+1, point.x-2)); + dstWindow[4] = (v4si)__builtin_ia32_loaddqu((char*)&image(point.y+2, point.x-2)); + + // Unfortunately, the rest cannot be loaded aligned + v4si buffer = {image(point.y-2, point.x+2), image(point.y-1, point.x+2), image(point.y, point.x+2), image(point.y+1, point.x+2)}; + dstWindow[5] = buffer; + } +#endif + + template + void CensusWindow::setReferencePoint(const cv::Point2i& point) { + refPoint = point; + } + +#ifdef SPARSESTEREO_SSE_OPTIMIZE + template <> + void CensusWindow<5>::setReferencePoint(const cv::Point2i& point) { + loadWindow(refImage, point, refWindow); + } +#endif + + template + __always_inline short CensusWindow::match(cv::Point2i point) const { + int costs = 0; + +#ifndef SPARSESTEREO_NO_POPCNT + for(int y=-SIZE/2; y<=SIZE/2; y++) { + unsigned long long* ptr1 = (unsigned long long*)&refImage(refPoint.y + y, refPoint.x -SIZE/2); + unsigned long long* ptr2 = (unsigned long long*)&compImage(point.y + y, point.x -SIZE/2); + + for(int x=0; x<=SIZE/2;x++) + costs += __builtin_popcountll(ptr1[x] ^ ptr2[x]); + } +#else + for(int y=-SIZE/2; y<=SIZE/2; y++) + for(int x=-SIZE/2; x<=SIZE/2; x++) + costs += hammingDist.calculate(refImage(refPoint.y + y, refPoint.x + x), + compImage(point.y + y, point.x + x)); +#endif + + return costs; + } + +#ifdef SPARSESTEREO_SSE_OPTIMIZE +#ifdef __LP64__ + // SSE2 optimized implementation for 64-bit systems. + template <> + __always_inline short CensusWindow<5>::match(cv::Point2i point) const { + v8hi xorRes; + unsigned int sum; + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y-2, point.x-2)), (v2di)refWindow[0]); + sum = lookupTableVec[(unsigned short)SIMD::element8(xorRes, 0)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 1)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 2)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 3)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 4)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 5)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 6)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 7)]; + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y-1, point.x-2)), (v2di)refWindow[1]); + sum += lookupTableVec[(unsigned short)SIMD::element8(xorRes, 0)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 1)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 2)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 3)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 4)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 5)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 6)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 7)]; + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y, point.x-2)), (v2di)refWindow[2]); + sum += lookupTableVec[(unsigned short)SIMD::element8(xorRes, 0)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 1)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 2)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 3)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 4)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 5)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 6)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 7)]; + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y+1, point.x-2)), (v2di)refWindow[3]); + sum += lookupTableVec[(unsigned short)SIMD::element8(xorRes, 0)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 1)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 2)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 3)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 4)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 5)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 6)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 7)]; + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y+2, point.x-2)), (v2di)refWindow[4]); + sum += lookupTableVec[(unsigned short)SIMD::element8(xorRes, 0)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 1)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 2)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 3)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 4)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 5)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 6)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 7)]; + + v4si buffer1 = {compImage(point.y-2, point.x+2), compImage(point.y-1, point.x+2), compImage(point.y, point.x+2), compImage(point.y+1, point.x+2)}; + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)buffer1, (v2di)refWindow[5]); + sum += lookupTableVec[(unsigned short)SIMD::element8(xorRes, 0)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 1)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 2)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 3)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 4)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 5)] + + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 6)] + lookupTableVec[(unsigned short)SIMD::element8(xorRes, 7)]; + + unsigned short lastXor = compImage(point.y+2, point.x+2) ^ refImage(refPoint.y + 2, refPoint.x+2); + sum += lookupTablePtr[(unsigned short)lastXor] + lookupTablePtr[((unsigned short*)&lastXor)[1]]; + + return sum; + } + +#else + + // SSE2 optimized implementation for 32-bit systems. + template <> + __always_inline short CensusWindow<5>::match(cv::Point2i point) const { + + v8hi xorRes; + v4si lookupPtr; + unsigned int sum; + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y-2, point.x-2)), (v2di)refWindow[0]); + lookupPtr = (v4si)__builtin_ia32_punpcklwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum = *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + lookupPtr = (v4si)__builtin_ia32_punpckhwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y-1, point.x-2)), (v2di)refWindow[1]); + lookupPtr = (v4si)__builtin_ia32_punpcklwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + lookupPtr = (v4si)__builtin_ia32_punpckhwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y, point.x-2)), (v2di)refWindow[2]); + lookupPtr = (v4si)__builtin_ia32_punpcklwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + lookupPtr = (v4si)__builtin_ia32_punpckhwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y+1, point.x-2)), (v2di)refWindow[3]); + lookupPtr = (v4si)__builtin_ia32_punpcklwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + lookupPtr = (v4si)__builtin_ia32_punpckhwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)__builtin_ia32_loaddqu((char*)&compImage(point.y+2, point.x-2)), (v2di)refWindow[4]); + lookupPtr = (v4si)__builtin_ia32_punpcklwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + lookupPtr = (v4si)__builtin_ia32_punpckhwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + + v4si buffer1 = {compImage(point.y-2, point.x+2), compImage(point.y-1, point.x+2), compImage(point.y, point.x+2), compImage(point.y+1, point.x+2)}; + xorRes = (v8hi)__builtin_ia32_pxor128((v2di)buffer1, (v2di)refWindow[5]); + lookupPtr = (v4si)__builtin_ia32_punpcklwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + lookupPtr = (v4si)__builtin_ia32_punpckhwd128(xorRes, (v8hi)zero) + lookupTableVec; + sum += *((unsigned char*)SIMD::element4(lookupPtr, 0)) + *((unsigned char*)SIMD::element4(lookupPtr, 1)) + + *((unsigned char*)SIMD::element4(lookupPtr, 2)) + *((unsigned char*)SIMD::element4(lookupPtr, 3)); + + unsigned short lastXor = compImage(point.y+2, point.x+2) ^ refImage(refPoint.y + 2, refPoint.x+2); + sum += lookupTablePtr[(unsigned short)lastXor] + lookupTablePtr[((unsigned short*)&lastXor)[1]]; + + return sum; + } +#endif +#endif +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/exception.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/exception.h new file mode 100644 index 00000000..89330d9f --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/exception.h @@ -0,0 +1,29 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_EXCEPTION_H +#define SPARSESTEREO_EXCEPTION_H + +#include +#include + +namespace sparsestereo { + // Base class for all exceptions + class Exception: public std::exception { + public: + Exception(const char* msg) + : message(msg) {} + Exception(std::string msg) + : message(msg) {} + ~Exception() throw() {} + + virtual const char* what() const throw() + {return message.c_str();} + + private: + std::string message; + }; +} +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast-inl.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast-inl.h new file mode 100644 index 00000000..db816ee1 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast-inl.h @@ -0,0 +1,47 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "extendedfast.h" + +namespace sparsestereo { + + template + __always_inline v16qi ExtendedFAST::loadCircleSSE(const cv::Mat_& input, int x, int y) { + const char* center = (const char*)&(input(y, x)); + + v16qi circle = {*(center + offsets[0]), *(center + offsets[1]), *(center + offsets[2]), *(center + offsets[3]), + *(center + offsets[4]), *(center + offsets[5]), *(center + offsets[6]), *(center + offsets[7]), + *(center + offsets[8]), *(center + offsets[9]), *(center + offsets[10]), *(center + offsets[11]), + *(center + offsets[12]), *(center + offsets[13]), *(center + offsets[14]), *(center + offsets[15]) + }; + + return circle; + } + + // Tests for a feature and optionally stores the score + __always_inline bool ExtendedFAST::testFeatureAndScore(const cv::Mat_& input, const cv::Point2i pt, bool storeScore) { + // Calculate the adaptive threshold by computing an RMS contrast + // like measure based on absolute values + v16qi uCircle = loadCircleSSE(input, (int)pt.x, (int)pt.y); + v16qi circle = uCircle - const128; + v2di partialSum = __builtin_ia32_psadbw128(uCircle, const0); + int sum = SIMD::element2(partialSum, 0) + SIMD::element2(partialSum, 1); + v16qi avg = SIMD::scalar16(sum/16); + + v2di partialSAD = __builtin_ia32_psadbw128(uCircle, avg); + int sad = SIMD::element2(partialSAD, 0) + SIMD::element2(partialSAD, 1); + unsigned char adaptiveThreshold = cv::saturate_cast(sad * adaptivity / 16); + v16qi adaptiveThresholdVec = SIMD::scalar16(adaptiveThreshold); + + // Perform corner test + v16qi center = SIMD::scalar16(SPARSESTEREO_EXFAST_CENTRAL_VALUE(input, (int)pt.y, (int)pt.x) - 128); + if(detectSingleCornerSSE(circle, center, adaptiveThresholdVec, 9)) { + if(storeScore) // We have to calculate a corner score + scores.push_back(calcSingleScoreSSE(circle, center, adaptiveThresholdVec, 9) - adaptiveThreshold); + return true; + } + else return false; + } +} diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast.cpp new file mode 100644 index 00000000..3378a76a --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast.cpp @@ -0,0 +1,232 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include +#include +#include "extendedfast.h" +#include "fast9-inl.h" +#include "exception.h" + +namespace sparsestereo { + using namespace cv; + using namespace std; + + const v16qi ExtendedFAST::const0(SIMD::scalar16NonLookup(0)); + const v16qi ExtendedFAST::const128(SIMD::scalar16NonLookup(128)); + const v16qi ExtendedFAST::const255(SIMD::scalar16NonLookup(255)); + + // Longest arc length lookup table + unsigned char ExtendedFAST::lookupTable[1U<<16]; + bool ExtendedFAST::lookupInitialized = false; + + ExtendedFAST::ExtendedFAST(bool nonmaxSuppression, unsigned char minThreshold, float adaptivity, bool subpixelPrecision, int minBorder) + : nonmaxSuppression(nonmaxSuppression), minThreshold(minThreshold), adaptivity(adaptivity), subpixelPrecision(subpixelPrecision) { + const int reserve = 512; + cornersMin.reserve(reserve); cornersAdapt.reserve(reserve); scores.reserve(reserve); + + if(!lookupInitialized) { + // The first instance initializes the lookup table. + // Should be threadsafe as we always write the same values and set the + // initialization flag last. + for(int i=0; i<=0xFFFF; i++) + lookupTable[i] = findLongestArc(i) >= 9 ? 0xFF : 0; + lookupInitialized = true; + } + } + + ExtendedFAST::~ExtendedFAST() { + } + + inline unsigned char ExtendedFAST::findLongestArc(unsigned short stripe) { + int bestLength = 0; + int startLength = -1; + int currentLength = 0; + + // We iterate over all possible 16-bit permutations + for(int i=1; i<0xFFFF; i = i<<1) { + if(stripe & i) + // Inside an arc segment + currentLength++; + else { + // Outside an arc segment + if(currentLength > bestLength) + bestLength = currentLength; + if(startLength == -1) + startLength = currentLength; + currentLength = 0; + } + } + + // wrap-around case + if(startLength != -1) + currentLength += startLength; + + // Handle last arc segment + if(currentLength > bestLength) + bestLength = currentLength; + + return bestLength; + } + + void ExtendedFAST::initOffsets(int step) { + offsets[0] = step*-3 -1; + offsets[1] = step*-3; + offsets[2] = step*-3 +1; + offsets[3] = step*-2 +2; + offsets[4] = step*-1 +3; + offsets[5] = +3; + offsets[6] = step +3; + offsets[7] = step*+2 +2; + offsets[8] = step*+3 +1; + offsets[9] = step*+3; + offsets[10] = step*+3 -1; + offsets[11] = step*+2 -2; + offsets[12] = step*+1 -3; //Aligned + offsets[13] = -3; //Aligned + offsets[14] = step*-1 -3; //Aligned + offsets[15] = step*-2 -2; + } + + void ExtendedFAST::detectImpl(const Mat& image, vector& keypoints, const Mat& mask) { + + if(mask.data != NULL) + throw Exception("Feature detection masks not supported!"); + else if(image.type() != CV_8U) + throw Exception("Image data has to be of type unsigned char!"); + + // Create offsets for circle pixel + initOffsets(image.step); + + FAST(image, cornersMin, minThreshold, false); + adaptiveThresholdAndScore(image); + + if(nonmaxSuppression) + { + // Perform nonmax suppression + vector nonmaxPoints; + fast9.nonMaxSuppression(cornersAdapt, scores, nonmaxPoints); + + // Copy and optionally refine result + keypoints.reserve(nonmaxPoints.size()); + for(unsigned int i=0; i& input) { + initOffsets((int)input.step); + for(unsigned int i=0; i& input, int x, int y) { + const char* center = (const char*)&(input(y, x)); + + v16qi circle = {*(center + offsets[0]), *(center + offsets[1]), *(center + offsets[2]), *(center + offsets[3]), + *(center + offsets[4]), *(center + offsets[5]), *(center + offsets[6]), *(center + offsets[7]), + *(center + offsets[8]), *(center + offsets[9]), *(center + offsets[10]), *(center + offsets[11]), + *(center + offsets[12]), *(center + offsets[13]), *(center + offsets[14]), *(center + offsets[15]) + }; + + return circle; + } + + __always_inline bool ExtendedFAST::testFeatureAndScore(const cv::Mat_& input, const cv::Point2i pt, bool storeScore) { + // Calculate the adaptive threshold by computing an RMS contrast + // like measure based on absolute values + v16qi uCircle = loadCircleSSE(input, (int)pt.x, (int)pt.y); + v16qi circle = uCircle - const128; + v2di partialSum = __builtin_ia32_psadbw128(uCircle, const0); + int sum = SIMD::element2(partialSum, 0) + SIMD::element2(partialSum, 1); + v16qi avg = SIMD::scalar16(sum/16); + + v2di partialSAD = __builtin_ia32_psadbw128(uCircle, avg); + int sad = SIMD::element2(partialSAD, 0) + SIMD::element2(partialSAD, 1); + unsigned char adaptiveThreshold = cv::saturate_cast(sad * adaptivity / 16); + v16qi adaptiveThresholdVec = SIMD::scalar16(adaptiveThreshold); + + // Perform corner test + v16qi center = SIMD::scalar16(SPARSESTEREO_EXFAST_CENTRAL_VALUE(input, (int)pt.y, (int)pt.x) - 128); + if(detectSingleCornerSSE(circle, center, adaptiveThresholdVec, 9)) { + if(storeScore) // We have to calculate a corner score + scores.push_back(calcSingleScoreSSE(circle, center, adaptiveThresholdVec, 9) - adaptiveThreshold); + return true; + } + else return false; + } + + __always_inline bool ExtendedFAST::detectSingleCornerSSE(const v16qi& circle, const v16qi& center, const v16qi& threshold, + unsigned char minLength) { + + // Find longest brighter arc + v16qi centersPlus = __builtin_ia32_paddsb128(center, threshold); + int arcBrighter = __builtin_ia32_pmovmskb128(__builtin_ia32_pcmpgtb128(circle, centersPlus)); + + if(lookupTable[arcBrighter] >= minLength) + return true; + else { + // Find longest darker arc + v16qi centersMinus = __builtin_ia32_psubsb128(center, threshold); + int arcDarker = __builtin_ia32_pmovmskb128(__builtin_ia32_pcmpgtb128(centersMinus, circle)); + if(lookupTable[arcDarker] >= minLength) + return true; + } + + return false; + } + + __always_inline unsigned char ExtendedFAST::calcSingleScoreSSE(const v16qi& circle, v16qi center, const v16qi& bstartVec, + unsigned char minLength) { + + v16qi bmin = bstartVec; + v16qi bmax = const255; + v16qi b = __builtin_ia32_pavgb128 (bmax, bmin); + center += const128; + + //Compute the score using binary search + for(;;) + { + // Find brighter arc + v16qi centerPlus = __builtin_ia32_paddusb128(center, b) - const128; + int arcBrighter = __builtin_ia32_pmovmskb128(__builtin_ia32_pcmpgtb128(circle, centerPlus)); + + if(lookupTable[arcBrighter] >= minLength) + bmin = b; // corner + else { + // Find darker arc + v16qi centerMinus = __builtin_ia32_psubusb128(center, b) - const128; + int arcDarker = __builtin_ia32_pmovmskb128(__builtin_ia32_pcmpgtb128(centerMinus, circle)); + if(lookupTable[arcDarker] >= minLength) + bmin = b; // corner + else + bmax = b; // Not a corner + } + + unsigned char singleBMin = SIMD::element16(bmin, 0), singleBMax = SIMD::element16(bmax, 0); + if(singleBMin == singleBMax || singleBMin == singleBMax - 1) + return (unsigned char)SIMD::element16(bmin, 0); + + // Update threshold + b = __builtin_ia32_pavgb128 (bmax, bmin); + } + } +} diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast.h new file mode 100644 index 00000000..7136ab51 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/extendedfast.h @@ -0,0 +1,77 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_EXTENDEDFAST_H +#define SPARSESTEREO_EXTENDEDFAST_H + +#include +#include +#include +#include "simd.h" +#include "fast9.h" + +namespace sparsestereo { + +//#define SPARSESTEREO_EXFAST_CENTRAL_VALUE(img, y, x) img(y, x) +#define SPARSESTEREO_EXFAST_CENTRAL_VALUE(img, y, x) (((int)img(y, x) + (int)img(y-1, x) + (int)img(y+1, x) + (int)img(y, x-1) + (int)img(y, x+1))/5) + + // Extended version of the fast feature detector + class ExtendedFAST: public cv::FeatureDetector { + public: + ExtendedFAST(bool nonmaxSuppression, unsigned char minThreshold, float adaptivity, bool subpixelPrecision, int minBorder = 0); + virtual ~ExtendedFAST(); + + protected: + // Implements the feature detector interface + virtual void detectImpl(const cv::Mat& image, std::vector& keypoints, const cv::Mat& mask=cv::Mat()) const { + // OpenCV tries to force this method to be const, but we don't like that! + const_cast(this)->detectImpl(image, keypoints, mask); + } + void detectImpl(const cv::Mat& image, std::vector& keypoints, const cv::Mat& mask=cv::Mat()); + + private: + // Constants for SSE processing + static const v16qi const0, const128, const255; + + bool nonmaxSuppression; + // Adaptive threshold parameteres + unsigned char minThreshold; + unsigned char maxThreshold; + float adaptivity; + bool subpixelPrecision; + int border; + FAST9 fast9; + + // Feature point data + std::vector cornersAdapt; + std::vector cornersMin; + std::vector scores; + + // Offsets for the circle pixel + int offsets[16]; + // Lookup table for longest arc lengths + static unsigned char lookupTable[1U<<16]; + static bool lookupInitialized; + + // Finds the longest arc for a given comparison pattern + inline unsigned char findLongestArc(unsigned short stripe); + // Initializes offsets for corner detection + void initOffsets(int step); + // Loads 16 circle pixels into one SSE vector + __always_inline v16qi loadCircleSSE(const cv::Mat_& input, int x, int y); + // Performs feature test and score calculation + __always_inline bool testFeatureAndScore(const cv::Mat_& input, const cv::Point2i pt, bool storeScore); + // Performs a corner detection using the circle pixels from an SSE vector + __always_inline bool detectSingleCornerSSE(const v16qi& circle, const v16qi& center, const v16qi& threshold, + unsigned char minLength); + // Performs a score calculation using the circle pixels from an SSE vector + __always_inline unsigned char calcSingleScoreSSE(const v16qi& circle, v16qi center, const v16qi& bstartVec, + unsigned char minLength); + // Applies the adaptive threshold to cornersMin, and calculates the score + void adaptiveThresholdAndScore(const cv::Mat_& input); + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/fast9-inl.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/fast9-inl.h new file mode 100644 index 00000000..ada45d04 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/fast9-inl.h @@ -0,0 +1,6016 @@ +// This code is based on the original FAST corner detector by Edward Rosten. +// Below is the original copyright and references + +/* +Copyright (c) 2006, 2008 Edward Rosten +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + *Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + *Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + *Neither the name of the University of Cambridge nor the names of + its contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* +The references are: + * Machine learning for high-speed corner detection, + E. Rosten and T. Drummond, ECCV 2006 + * Faster and better: A machine learning approach to corner detection + E. Rosten, R. Porter and T. Drummond, PAMI, 2009 +*/ + +#ifndef SPARSESTEREO_FAST9_INL_H +#define SPARSESTEREO_FAST9_INL_H + +#include "fast9.h" + +namespace sparsestereo { + template + const int FAST9::pixelOffsetsX[16] = {0, 1, 2, 3, 3, 3, 2, 1, 0, -1, -2, -3, -3, -3, -2, -1}; + + template + const int FAST9::pixelOffsetsY[16] = {3, 3, 2, 1, 0, -1, -2, -3, -3, -3, -2, -1, 0, 1, 2, 3}; + + template + FAST9::FAST9():step(-1) { + setStep(640); + } + + template + void FAST9::setStep(int s) { + if(s == step) + return; // noting changed + + step = s; + for(int i=0; i<16; i++) + pixel[i] = pixelOffsetsX[i] + step * pixelOffsetsY[i]; + } + + /* This defines non-strict maxima */ +#define SPARSESTEREO_NONMAX_COMPARE(X, Y) ((X)>=(Y)) + + /* This is a fast, integer only, sparse nonmaximal suppression. */ + /* probably only useful for FAST corner detection */ + template template + void FAST9::nonMaxSuppression(const std::vector& corners, const std::vector& scores, std::vector& ret_nonmax) const { + int i, j, num_corners = (int)corners.size(); + + // Point above points (roughly) to the pixel above the one of interest, if there is a feature there. + int point_above = 0; + int point_below = 0; + + ret_nonmax.clear(); + if(num_corners < 1) + return; + + /* Find where each row begins + (the corners are output in raster scan order). A beginning of -1 signifies + that there are no corners on that row. */ + int last_row = corners[num_corners-1].y; + std::vector row_start(last_row+1); + + for(i=0; i < last_row+1; i++) + row_start[i] = -1; + + int prev_row = -1; + for(i=0; i< num_corners; i++) + if(corners[i].y != prev_row) + { + row_start[corners[i].y] = i; + prev_row = corners[i].y; + } + + + ret_nonmax.reserve(num_corners); + for(i = 0; i < num_corners; i++) { + + T2 score = scores[i]; + cv::Point pos = corners[i]; + + // Check left + if(i > 0 && corners[i-1].x == pos.x-1 && corners[i-1].y == pos.y) { + if(SPARSESTEREO_NONMAX_COMPARE(scores[i-1], score)) + continue; + } + + // Check right + if(i < num_corners-1 && corners[i+1].x == pos.x+1 && corners[i+1].y == pos.y) { + if(SPARSESTEREO_NONMAX_COMPARE(scores[i+1], score)) + continue; + } + + bool suppressed = false; + // Check above (if there is a valid row above) + if(pos.y != 0 && row_start[pos.y - 1] != -1) { + // Make sure that current point_above is one row above. + if(corners[point_above].y < pos.y - 1) + point_above = row_start[pos.y-1]; + + // Make point_above point to the first of the pixels above the current point, if it exists. + for(; corners[point_above].y < pos.y && corners[point_above].x < pos.x - 1; point_above++) + ; + + for(j=point_above; corners[j].y < pos.y && corners[j].x <= pos.x + 1; j++) { + int x = corners[j].x; + if( (x == pos.x - 1 || x ==pos.x || x == pos.x+1)) { + if(SPARSESTEREO_NONMAX_COMPARE(scores[j], score)) { + suppressed = true; + break; + } + } + } + if( suppressed ) + continue; + } + + // Check below (if there is anything below) + if(pos.y != last_row && row_start[pos.y + 1] != -1 && point_below < num_corners) { // Nothing below + if(corners[point_below].y < pos.y + 1) + point_below = row_start[pos.y+1]; + + // Make point below point to one of the pixels belowthe current point, if it exists. + for(; point_below < num_corners && corners[point_below].y == pos.y+1 + && corners[point_below].x < pos.x - 1; point_below++) + ; + + for(j=point_below; j < num_corners && corners[j].y == pos.y+1 && corners[j].x <= pos.x + 1; j++) { + int x = corners[j].x; + if( (x == pos.x - 1 || x ==pos.x || x == pos.x+1)) { + if(SPARSESTEREO_NONMAX_COMPARE(scores[j],score)) { + suppressed = true; + break; + } + } + } + if( suppressed ) + continue; + } + + ret_nonmax.push_back(i); + } + } + + template + int FAST9::cornerScore(const T* p, T c, int bstart) const { + int bmin = bstart; + int bmax = 255; + int b = (bmax + bmin)/2; + + /*Compute the score using binary search*/ + for(;;) + { + int cb = c + b; + int c_b= c - b; + + if( p[pixel[0]] > cb) { + if( p[pixel[1]] > cb) { + if( p[pixel[2]] > cb) { + if( p[pixel[3]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[7]] < c_b) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[14]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[6]] < c_b) { + if( p[pixel[15]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[13]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[13]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[5]] < c_b) { + if( p[pixel[14]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[12]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[14]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[6]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[12]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[6]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[4]] < c_b) { + if( p[pixel[13]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[11]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[12]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[13]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[11]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[3]] < c_b) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[10]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[4]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[10]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[4]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[2]] < c_b) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[9]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[3]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[9]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[3]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[12]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[1]] < c_b) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[2]] > cb) { + if( p[pixel[3]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[8]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[3]] < c_b) { + if( p[pixel[2]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[2]] > cb) { + if( p[pixel[3]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[8]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[3]] < c_b) { + if( p[pixel[2]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[11]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[0]] < c_b) { + if( p[pixel[1]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[3]] > cb) { + if( p[pixel[2]] > cb) { + goto is_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[2]] < c_b) { + if( p[pixel[3]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[1]] < c_b) { + if( p[pixel[2]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[3]] > cb) { + goto is_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[2]] < c_b) { + if( p[pixel[3]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[4]] > cb) { + goto is_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[3]] < c_b) { + if( p[pixel[4]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + goto is_a_corner; + } else + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[13]] < c_b) { + if( p[pixel[11]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[12]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[4]] < c_b) { + if( p[pixel[5]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[6]] > cb) { + goto is_a_corner; + } else + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[14]] < c_b) { + if( p[pixel[12]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[6]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[5]] < c_b) { + if( p[pixel[6]] > cb) { + if( p[pixel[15]] < c_b) { + if( p[pixel[13]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[6]] < c_b) { + if( p[pixel[7]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[13]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[6]] > cb) { + goto is_a_corner; + } else + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + goto is_a_corner; + } else + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[4]] > cb) { + goto is_a_corner; + } else + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[9]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[3]] > cb) { + goto is_a_corner; + } else + if( p[pixel[12]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[8]] > cb) { + if( p[pixel[7]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[10]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[3]] > cb) { + if( p[pixel[2]] > cb) { + goto is_a_corner; + } else + if( p[pixel[11]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[3]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[2]] < c_b) { + if( p[pixel[3]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[7]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[7]] > cb) { + if( p[pixel[8]] > cb) { + if( p[pixel[9]] > cb) { + if( p[pixel[6]] > cb) { + if( p[pixel[5]] > cb) { + if( p[pixel[4]] > cb) { + if( p[pixel[3]] > cb) { + if( p[pixel[2]] > cb) { + if( p[pixel[1]] > cb) { + goto is_a_corner; + } else + if( p[pixel[10]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] > cb) { + if( p[pixel[11]] > cb) { + if( p[pixel[12]] > cb) { + if( p[pixel[13]] > cb) { + if( p[pixel[14]] > cb) { + if( p[pixel[15]] > cb) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else if( p[pixel[7]] < c_b) { + if( p[pixel[8]] < c_b) { + if( p[pixel[9]] < c_b) { + if( p[pixel[6]] < c_b) { + if( p[pixel[5]] < c_b) { + if( p[pixel[4]] < c_b) { + if( p[pixel[3]] < c_b) { + if( p[pixel[2]] < c_b) { + if( p[pixel[1]] < c_b) { + goto is_a_corner; + } else + if( p[pixel[10]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + if( p[pixel[10]] < c_b) { + if( p[pixel[11]] < c_b) { + if( p[pixel[12]] < c_b) { + if( p[pixel[13]] < c_b) { + if( p[pixel[14]] < c_b) { + if( p[pixel[15]] < c_b) { + goto is_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + } else + goto is_not_a_corner; + + is_a_corner: + bmin=b; + goto end_if; + + is_not_a_corner: + bmax=b; + goto end_if; + + end_if: + + if(bmin == bmax - 1 || bmin == bmax) + return bmin; + b = (bmin + bmax) / 2; + } + } + + template + __always_inline int FAST9::cornerTest(const T* p, T c, unsigned char b) const { + int cb = c + b; + int c_b= c - b; + + if(p[pixel[0]] > cb) { + if(p[pixel[1]] > cb) { + if(p[pixel[2]] > cb) { + if(p[pixel[3]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x108; //pixel 0 - 8 + } else + if(p[pixel[15]] > cb) { + return 0x1f7; //pixel 15 - 7 + } else + return 0; + } else if(p[pixel[7]] < c_b) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1e6; //pixel 14 - 6 + } else + return 0; + } else if(p[pixel[14]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1e6; //pixel 14 - 6 + } else + return 0; + } else + return 0; + } else if(p[pixel[6]] < c_b) { + if(p[pixel[15]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x1d5; //pixel 13 - 5 + } else + return 0; + } else if(p[pixel[13]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1d5; //pixel 13 - 5 + } else + return 0; + } else + return 0; + } else if(p[pixel[13]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[5]] < c_b) { + if(p[pixel[14]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1c4; //pixel 12 - 4 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[12]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[14]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[6]] < c_b) { + return 0x05e; //pixel 5 - 14 + } else + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1c4; //pixel 12 - 4 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[12]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[6]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[4]] < c_b) { + if(p[pixel[13]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1b3; //pixel 11 - 3 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[11]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[12]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[13]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + return 0x04d; //pixel 4 - 13 + } else + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1b3; //pixel 11 - 3 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[11]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[3]] < c_b) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1a2; //pixel 10 - 2 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[10]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[4]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x1a2; //pixel 10 - 2 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[10]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[4]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[2]] < c_b) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x191; //pixel 9 - 1 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[9]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[3]] < c_b) { + return 0x02a; //pixel 2 - 10 + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x191; //pixel 9 - 1 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[9]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[3]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + if(p[pixel[12]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[1]] < c_b) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x180; //pixel 8 - 0 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[2]] > cb) { + if(p[pixel[3]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x12a; //pixel 2 - 10 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[8]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[3]] < c_b) { + if(p[pixel[2]] < c_b) { + return 0x019; //pixel 1 - 9 + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x180; //pixel 8 - 0 + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[2]] > cb) { + if(p[pixel[3]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + return 0x12a; //pixel 2 - 10 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[8]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[3]] < c_b) { + if(p[pixel[2]] < c_b) { + return 0x02a; //pixel 2 - 10 + } else + if(p[pixel[11]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + return 0; + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[0]] < c_b) { + if(p[pixel[1]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[3]] > cb) { + if(p[pixel[2]] > cb) { + return 0x119; //pixel 1 - 9 + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x080; //pixel 8 - 0 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[2]] < c_b) { + if(p[pixel[3]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x02a; //pixel 2 - 10 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[1]] < c_b) { + if(p[pixel[2]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[3]] > cb) { + return 0x12a; //pixel 2 - 10 + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x091; //pixel 9 - 1 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[2]] < c_b) { + if(p[pixel[3]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[4]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0a2; //pixel 10 - 2 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[3]] < c_b) { + if(p[pixel[4]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + return 0x14d; //pixel 4 - 13 + } else + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[13]] < c_b) { + if(p[pixel[11]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[12]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0b3; //pixel 11 - 3 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[4]] < c_b) { + if(p[pixel[5]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[6]] > cb) { + return 0x15e; //pixel 5 - 14 + } else + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[14]] < c_b) { + if(p[pixel[12]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0c4; //pixel 12 - 4 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[6]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[5]] < c_b) { + if(p[pixel[6]] > cb) { + if(p[pixel[15]] < c_b) { + if(p[pixel[13]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x0d5; //pixel 13 - 5 + } else + return 0; + } else + return 0; + } else + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[6]] < c_b) { + if(p[pixel[7]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0e6; //pixel 14 - 6 + } else + return 0; + } else + return 0; + } else if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x008; //pixel 0 - 8 + } else + if(p[pixel[15]] < c_b) { + return 0x0f7; //pixel 15 - 7 + } else + return 0; + } else + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0e6; //pixel 14 - 6 + } else + return 0; + } else + return 0; + } else + if(p[pixel[13]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0d5; //pixel 13 - 5 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[6]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0c4; //pixel 12 - 4 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0b3; //pixel 11 - 3 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[4]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x0a2; //pixel 10 - 2 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[9]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[3]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + if(p[pixel[12]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x091; //pixel 9 - 1 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[8]] > cb) { + if(p[pixel[7]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[10]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[3]] > cb) { + if(p[pixel[2]] > cb) { + return 0x12a; //pixel 2 - 10 + } else + if(p[pixel[11]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x080; //pixel 8 - 0 + } else + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[3]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[2]] < c_b) { + if(p[pixel[3]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[7]] < c_b) { + return 0x02a; //pixel 2 - 10 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[7]] > cb) { + if(p[pixel[8]] > cb) { + if(p[pixel[9]] > cb) { + if(p[pixel[6]] > cb) { + if(p[pixel[5]] > cb) { + if(p[pixel[4]] > cb) { + if(p[pixel[3]] > cb) { + if(p[pixel[2]] > cb) { + if(p[pixel[1]] > cb) { + return 0x119; //pixel 1 - 9 + } else + if(p[pixel[10]] > cb) { + return 0x12a; //pixel 2 - 10 + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + return 0x13b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + return 0x14c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + return 0x15d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + return 0x16e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] > cb) { + if(p[pixel[11]] > cb) { + if(p[pixel[12]] > cb) { + if(p[pixel[13]] > cb) { + if(p[pixel[14]] > cb) { + if(p[pixel[15]] > cb) { + return 0x17f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else if(p[pixel[7]] < c_b) { + if(p[pixel[8]] < c_b) { + if(p[pixel[9]] < c_b) { + if(p[pixel[6]] < c_b) { + if(p[pixel[5]] < c_b) { + if(p[pixel[4]] < c_b) { + if(p[pixel[3]] < c_b) { + if(p[pixel[2]] < c_b) { + if(p[pixel[1]] < c_b) { + return 0x019; //pixel 1 - 9 + } else + if(p[pixel[10]] < c_b) { + return 0x02a; //pixel 2 - 10 + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + return 0x03b; //pixel 3 - 11 + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + return 0x04c; //pixel 4 - 12 + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + return 0x05d; //pixel 5 - 13 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + return 0x06e; //pixel 6 - 14 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + if(p[pixel[10]] < c_b) { + if(p[pixel[11]] < c_b) { + if(p[pixel[12]] < c_b) { + if(p[pixel[13]] < c_b) { + if(p[pixel[14]] < c_b) { + if(p[pixel[15]] < c_b) { + return 0x07f; //pixel 7 - 15 + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + } else + return 0; + + return 1; + } +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/fast9.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/fast9.h new file mode 100644 index 00000000..16e44888 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/fast9.h @@ -0,0 +1,37 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_FAST9_H +#define SPARSESTEREO_FAST9_H + +namespace sparsestereo { + // Some functions of FAST9 used by other classes + template + class FAST9 { + public: + // Pixel offsets used + int pixel[16]; + + FAST9(); + + // Sets the image row step width + void setStep(int step); + // Performs non-maximum suppression + template + void nonMaxSuppression(const std::vector& corners, const std::vector& scores, std::vector& ret_nonmax) const; + + // Autogenerated scoring method + int cornerScore(const T* p, T c, int bstart) const; + // Autogenerated cornet test method + __always_inline int cornerTest(const T* p, T c, unsigned char b) const; + + private: + static const int pixelOffsetsX[16]; + static const int pixelOffsetsY[16]; + int step; + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/hammingdistance.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/hammingdistance.cpp new file mode 100644 index 00000000..3b7b2f34 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/hammingdistance.cpp @@ -0,0 +1,24 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "hammingdistance.h" + +namespace sparsestereo { + bool HammingDistance::tableInitialized = false; + unsigned char HammingDistance::lookupTable[1U<<16] __attribute__ ((aligned (16))); + + HammingDistance::HammingDistance() { + if(!tableInitialized) + initTable(); + } + + void HammingDistance::initTable() { + // This is threadsafe because we always write the same values and + // set the initialization flag at the end + for(unsigned int i=0; i < sizeof(lookupTable)/sizeof(*lookupTable); i++) + lookupTable[i] = countNotPrecomputed(i); + tableInitialized = true; + } +} \ No newline at end of file diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/hammingdistance.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/hammingdistance.h new file mode 100644 index 00000000..15454872 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/hammingdistance.h @@ -0,0 +1,80 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_HAMMINGDISTANCE_H +#define SPARSESTEREO_HAMMINGDISTANCE_H + +namespace sparsestereo { + // Calculates the bitwise hamming distance between two integers + // or counts the number of set bits. + // NOTE: Prefer popcnt instruction if available + class HammingDistance { + public: + HammingDistance(); + + // Calculates the hamming distance for 32-bit integers + unsigned char calculate(unsigned int x, unsigned int y) const { + return countBits(x^y); + } + + // Calculates the hamming distance for 16-bit integers + unsigned char calculate(unsigned short x, unsigned short y) const { + return countBits((unsigned short)(x^y)); + } + + // Determines the number of set bits for a 64-bit integer + unsigned char countBits(unsigned long long x) const { + // Count using precomputed table: + int buf = lookupTable[x & 0xFFFFU]; + x = x >> 16; + buf += lookupTable[x & 0xFFFFU]; + x = x >> 16; + return buf + lookupTable[x & 0xFFFFU] + lookupTable[x >> 16]; + } + + // Determines the number of set bits for a 32-bit integer + unsigned char countBits(unsigned int x) const { + // Count using precomputed table: + return lookupTable[x & 0xFFFFU] + lookupTable[x >> 16]; + } + + // Determines the number of set bits for a 16-bit integer + unsigned char countBits(unsigned short x) const { + return lookupTable[x]; + } + + // Returns a pointer to the static lookup table + const unsigned char* getLookupTable() const {return lookupTable;} + + private: + static bool tableInitialized; + static unsigned char lookupTable[1U<<16]; + + // Initializes the lookup table + void initTable(); + + // Methods for parallel bit counting algorithm + unsigned int getBit(int i) {return 0x1U << i;} + unsigned int getMask(int i) { + return (((unsigned int)(-1)) / (getBit(getBit(i)) + 1U)); + } + unsigned int countBlocks(int x, int level) { + return ((x) & getMask(level)) + (((x) >> (getBit(level))) & getMask(level)); + } + + // Performs a parallel bit counting without using the precomputed table + unsigned char countNotPrecomputed(unsigned int i) { + i = countBlocks(i, 0); + i = countBlocks(i, 1); + i = countBlocks(i, 2); + i = countBlocks(i, 3); + i = countBlocks(i, 4); + // i = countBlocks(i, 5); //For 64 Bits + return i; + } + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/imageconversion.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/imageconversion.cpp new file mode 100644 index 00000000..4386e138 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/imageconversion.cpp @@ -0,0 +1,95 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "imageconversion.h" +#include "simd.h" + +namespace sparsestereo { + using namespace cv; + + double ImageConversion::get8UScalingFactor(int type) { + if(type == CV_16U) + return 1.0/256.0; + else if(type == CV_32F || type == CV_64F) + return 255.0; + else return 1.0; + } + + double ImageConversion::get16UScalingFactor(int type) { + if(type == CV_8U) + return 256.0; + else if(type == CV_32F || type == CV_64F) + return 65535.0; + else return 1.0; + } + + bool ImageConversion::convert(const Mat& input, Mat* output, int channels, int type, + double scalingFactor, int colorConv) { + if(input.channels() == channels && input.type() == type) { + // No conversion neccessary + (*output) = input; + return false; + } + else if(input.type() != type) { + // Convert the data type + Mat buffer; + input.convertTo(buffer, type, scalingFactor); + if(input.channels() != channels) + // Then convert color + cvtColor(buffer, *output, colorConv); + else (*output) = buffer; + } else { + // Only convert color + cvtColor(input, *output, colorConv); + } + + return true; + } + + bool ImageConversion::convertTo8U(const Mat& input, cv::Mat_* output) { + return convert(input, output, 1, CV_8U, get8UScalingFactor(input.type()), CV_RGB2GRAY); + } + + bool ImageConversion::convertTo16U(const Mat& input, cv::Mat_* output) { + return convert(input, output, 1, CV_16U, get8UScalingFactor(input.type()), CV_RGB2GRAY); + } + + bool ImageConversion::convertToColor(const Mat& input, cv::Mat_* output) { + return convert(input, output, 3, CV_8U, get8UScalingFactor(input.type()), CV_GRAY2RGB); + } + + void ImageConversion::unsignedToSigned(const Mat_& src, Mat_* dst) { + // Adds an offset of -128 to all values + if(src.cols % 16 == 0) { + // SSE Optimized + v16qi offset = SIMD::scalar16(-128); + + for(int y=0; y& src, Mat_* dst) { + // Adds an offset of 128 to all values + if(src.cols%16 != 0) { + // SSE Optimized + v16qi offset = SIMD::scalar16(128); + + for(int y=0; y + +namespace sparsestereo { + // Static methods for image conversion. If the return value is false + // then the output image points to the data of the input image. In this + // case, the input image should not be deleted. + class ImageConversion { + public: + // Converts an image to 8-bit grayscale + static bool convertTo8U(const cv::Mat& input, cv::Mat_* output); + // Converts an image to 16-bit grayscale + static bool convertTo16U(const cv::Mat& input, cv::Mat_* output); + // Converts an image to a color image + static bool convertToColor(const cv::Mat& input, cv::Mat_* output); + // Converts an unsigned char to a signed char image + static void unsignedToSigned(const cv::Mat_& src,cv::Mat_* dst); + // Converts a signed char to an unsigned char image + static void signedToUnsigned(const cv::Mat_& src,cv::Mat_* dst); + + private: + // Methods for calculating scaling factors + static double get8UScalingFactor(int type); + static double get16UScalingFactor(int type); + static bool convert(const cv::Mat& input, cv::Mat* output, int channels, int type, + double scalingFactor, int colorConv); + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/simd.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/simd.cpp new file mode 100644 index 00000000..0cb0f425 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/simd.cpp @@ -0,0 +1,29 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "simd.h" + +#define S16(x) SIMD::scalar16NonLookup(x) + +namespace sparsestereo { + v16qi SIMD::v16Constants[256] __attribute__((aligned (16))) = { + S16(0), S16(1), S16(2), S16(3), S16(4), S16(5), S16(6), S16(7), S16(8), S16(9), S16(10), S16(11), S16(12), S16(13), S16(14), S16(15), + S16(16), S16(17), S16(18), S16(19), S16(20), S16(21), S16(22), S16(23), S16(24), S16(25), S16(26), S16(27), S16(28), S16(29), S16(30), S16(31), + S16(32), S16(33), S16(34), S16(35), S16(36), S16(37), S16(38), S16(39), S16(40), S16(41), S16(42), S16(43), S16(44), S16(45), S16(46), S16(47), + S16(48), S16(49), S16(50), S16(51), S16(52), S16(53), S16(54), S16(55), S16(56), S16(57), S16(58), S16(59), S16(60), S16(61), S16(62), S16(63), + S16(64), S16(65), S16(66), S16(67), S16(68), S16(69), S16(70), S16(71), S16(72), S16(73), S16(74), S16(75), S16(76), S16(77), S16(78), S16(79), + S16(80), S16(81), S16(82), S16(83), S16(84), S16(85), S16(86), S16(87), S16(88), S16(89), S16(90), S16(91), S16(92), S16(93), S16(94), S16(95), + S16(96), S16(97), S16(98), S16(99), S16(100), S16(101), S16(102), S16(103), S16(104), S16(105), S16(106), S16(107), S16(108), S16(109), S16(110), S16(111), + S16(112), S16(113), S16(114), S16(115), S16(116), S16(117), S16(118), S16(119), S16(120), S16(121), S16(122), S16(123), S16(124), S16(125), S16(126), S16(127), + S16(128), S16(129), S16(130), S16(131), S16(132), S16(133), S16(134), S16(135), S16(136), S16(137), S16(138), S16(139), S16(140), S16(141), S16(142), S16(143), + S16(144), S16(145), S16(146), S16(147), S16(148), S16(149), S16(150), S16(151), S16(152), S16(153), S16(154), S16(155), S16(156), S16(157), S16(158), S16(159), + S16(160), S16(161), S16(162), S16(163), S16(164), S16(165), S16(166), S16(167), S16(168), S16(169), S16(170), S16(171), S16(172), S16(173), S16(174), S16(175), + S16(176), S16(177), S16(178), S16(179), S16(180), S16(181), S16(182), S16(183), S16(184), S16(185), S16(186), S16(187), S16(188), S16(189), S16(190), S16(191), + S16(192), S16(193), S16(194), S16(195), S16(196), S16(197), S16(198), S16(199), S16(200), S16(201), S16(202), S16(203), S16(204), S16(205), S16(206), S16(207), + S16(208), S16(209), S16(210), S16(211), S16(212), S16(213), S16(214), S16(215), S16(216), S16(217), S16(218), S16(219), S16(220), S16(221), S16(222), S16(223), + S16(224), S16(225), S16(226), S16(227), S16(228), S16(229), S16(230), S16(231), S16(232), S16(233), S16(234), S16(235), S16(236), S16(237), S16(238), S16(239), + S16(240), S16(241), S16(242), S16(243), S16(244), S16(245), S16(246), S16(247), S16(248), S16(249), S16(250), S16(251), S16(252), S16(253), S16(254), S16(255) + }; +} diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/simd.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/simd.h new file mode 100644 index 00000000..854f91e5 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/simd.h @@ -0,0 +1,150 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_SIMD_H +#define SPARSESTEREO_SIMD_H + +#include +#include +#include + +// SIMD Headers +#ifdef __SSE3__ +#include +#else +#error "SSE3 support not available" +#endif + +#ifdef __SSSE3__ +#include +#else +#warning "SSSE3 support not available" +#endif + +#ifdef __SSE4_1__ +#include +#else +#warning "SSE4.1 support not available" +#endif + +#ifdef __SSE4_2__ +#include +#else +#warning "SSE4.2 support not available" +#endif + +/* This file contains declarations required for SIMD programming */ + +namespace sparsestereo { + // Integer types + typedef char v16qi __attribute__ ((vector_size (16), aligned (16))); + typedef short v8hi __attribute__ ((vector_size (16), aligned (16))); + typedef int v4si __attribute__ ((vector_size (16), aligned (16))); + typedef long long v2di __attribute__ ((vector_size (16), aligned (16))); + + // Floating point types + typedef float v4sf __attribute__ ((vector_size (16), aligned (16))); + typedef double v2sf __attribute__ ((vector_size (16), aligned (16))); + + class SIMD { + public: + // Methods for creating constants with just one scalar + static __always_inline const v16qi& scalar16(char c) { + /*const v16qi ret = {c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c}; + return ret;*/ + return v16Constants[(unsigned char)c]; + } + + static v16qi scalar16NonLookup(char c) { + const v16qi ret = {c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c}; + return ret; + } + + static __always_inline v8hi scalar8(short s) { + const v8hi ret = {s, s, s, s, s, s, s, s}; + return ret; + } + + static __always_inline v4si scalar4(int i) { + const v4si ret = {i, i, i, i}; + return ret; + } + + static __always_inline v2di scalar2(long long i) { + const v2di ret = {i, i}; + return ret; + } + + // Methods for accessing vector elements + + static __always_inline char& element16(v16qi& vec, int index) { + union Accessor { + v16qi vector; + char elements[16]; + } __attribute__((may_alias)); + + return ((Accessor*)&vec)->elements[index]; + } + + static __always_inline short& element8(v8hi& vec, int index) { + union Accessor { + v8hi vector; + short elements[8]; + } __attribute__((may_alias)); + + return ((Accessor*)&vec)->elements[index]; + } + + static __always_inline int& element4(v4si& vec, int index) { + union Accessor { + v4si vector; + int elements[16]; + } __attribute__((may_alias)); + + return ((Accessor*)&vec)->elements[index]; + } + + static __always_inline long long& element2(v2di& vec, int index) { + union Accessor { + v2di vector; + long long elements[2]; + } __attribute__((may_alias)); + + return ((Accessor*)&vec)->elements[index]; + } + + // Allocates a memory aligned array (only use primitive types!) + template + static __always_inline boost::shared_array alignedNew(int size, int alignment = 16) { + + char* ptr = new char[sizeof(T) * size + alignment -1]; + T* alignedPtr = (T*)((size_t(ptr) + alignment-1) & -alignment); + + AlignedDeallocator d = {ptr}; + return boost::shared_array(alignedPtr, d); + } + + // Packed rotate left, 16-bit + static __always_inline v8hi prol16(const v8hi& value) { + v8hi lsb = __builtin_ia32_psrlwi128(value, 15); + return (value+value) | lsb; + } + + private: + // Required for dealocating aligned memory areas + template + struct AlignedDeallocator { + char* ptr; + void operator() (T*) { + delete [] ptr; + } + }; + + // Lookup table for SSE 8-bit constants + static v16qi v16Constants[256] __attribute__((aligned (16))); + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsematch.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsematch.h new file mode 100644 index 00000000..c1a009ef --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsematch.h @@ -0,0 +1,54 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_SPARSEMATCH_H +#define SPARSESTEREO_SPARSEMATCH_H + +#include +#include "stereorectification.h" +#include "exception.h" + +namespace sparsestereo { + // Struct for a single sparse stereo match + struct SparseMatch { + SparseMatch() + : imgLeft(NULL), imgRight(NULL), rectLeft(), rectRight(), cost(0) {} + SparseMatch(const cv::KeyPoint* imgLeft, const cv::KeyPoint* imgRight, + const cv::Point2f& rectLeft, const cv::Point2f& rectRight, short cost = 0) + : imgLeft(imgLeft), imgRight(imgRight), rectLeft(rectLeft), rectRight(rectRight), cost(cost) {} + SparseMatch(const cv::KeyPoint* imgLeft, const cv::KeyPoint* imgRight, short cost = 0) + : imgLeft(imgLeft), imgRight(imgRight), rectLeft(imgLeft->pt), rectRight(imgRight->pt), cost(cost) {} + + const cv::KeyPoint* imgLeft; + const cv::KeyPoint* imgRight; + cv::Point2f rectLeft; + cv::Point2f rectRight; + short cost; + + float disparity() const { + return rectLeft.x - rectRight.x; + } + + // Converts a set of image coordinates and disparity values to 3d points + static void projectMatches(const std::vector& matches, std::vector* points, + const StereoRectification* rect) { + if(rect != NULL) { + const CalibrationResult& calib = rect->getCalibrationResult(); + cv::Mat_ qFloat(calib.Q.rows, calib.Q.cols); + calib.Q.convertTo(qFloat, CV_32F); + + points->reserve(matches.size()); + for(unsigned int i=0; i product = qFloat * cv::Mat_(4, 1, point); + points->push_back(cv::Point3f(product(0)/product(3), product(1)/product(3), product(2)/product(3))); + } + } + else throw Exception("Projection of 3d points is only possible for calibrated cameras."); + } + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparserectification.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparserectification.cpp new file mode 100644 index 00000000..c5373d13 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparserectification.cpp @@ -0,0 +1,111 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "sparserectification.h" +#include "stereorectification.h" + +namespace sparsestereo { + using namespace std; + using namespace cv; + + SparseRectification::SparseRectification(bool subpixelFeatures, StereoRectification* rect) + : subpixelFeatures(subpixelFeatures), rect(rect) { + } + + void SparseRectification::rectify(const vector& inLeft, const vector& inRight, vector* outLeft, vector* outRight) { + + outLeft->resize(inLeft.size()); + outRight->resize(inRight.size()); + + //#pragma omp parallel sections default(shared) num_threads(2) + { + //#pragma omp section + { + if(rect != NULL) { + // Rectify left features + if(subpixelFeatures) + for(int i=0; i<(int)inLeft.size(); i++) { + (*outLeft)[i].imgPoint = &inLeft[i]; + (*outLeft)[i].rectPoint = rect->rectifyLeftPoint(inLeft[i].pt); + } + else for(int i=0; i<(int)inLeft.size(); i++) { + (*outLeft)[i].imgPoint = &inLeft[i]; + (*outLeft)[i].rectPoint = rect->rectifyLeftPoint(cv::Point2i(inLeft[i].pt.x, inLeft[i].pt.y)); + } + } else + // Copy unrectified + for(int i=0; i<(int)inLeft.size(); i++) { + (*outLeft)[i].imgPoint = &inLeft[i]; + (*outLeft)[i].rectPoint = inLeft[i].pt; + } + + // Sort features + sort(outLeft->begin(), outLeft->end(), featureSortComp); + } + + //#pragma omp section + { + if(rect != NULL) { + // Rectify right features + if(subpixelFeatures) + for(int i=0; i<(int)inRight.size(); i++) { + (*outRight)[i].imgPoint = &inRight[i]; + (*outRight)[i].rectPoint = rect->rectifyRightPoint(inRight[i].pt); + } + else for(int i=0; i<(int)inRight.size(); i++) { + (*outRight)[i].imgPoint = &inRight[i]; + (*outRight)[i].rectPoint = rect->rectifyRightPoint(cv::Point2i(inRight[i].pt.x, inRight[i].pt.y)); + } + } else + // Copy unrectified + for(int i=0; i<(int)inRight.size(); i++) { + (*outRight)[i].imgPoint = &inRight[i]; + (*outRight)[i].rectPoint = inRight[i].pt; + } + + // Sort features + sort(outRight->begin(), outRight->end(), featureSortComp); + } + } + } + + void SparseRectification::precomputeEpilinesStart(int imageWidth, int imageHeight, Mat_* dst) { + Epiline::setMaxEpilineLength(imageWidth); + (*dst) = cv::Mat_(imageHeight, imageWidth, (short)0); + + for(int y=0; ygetCalibrationResult().imageSize.width-1, rightRectX)); + + for(int i=0; i epsilon; i++) { + Epiline epiline = rect->getLeftEpiline(cv::Point2f(x1, leftImgY)); + if(!epiline.isValid()) + break; // Lets quit early if we can't find a valid epiline + float y1 = epiline.at(int(x1+0.5)); + + y1 = max(0.0F, min((float)rect->getCalibrationResult().imageSize.height-1, y1)); + + cv::Point2f guess = rect->rectifyLeftPoint(cv::Point2f(x1, y1)); + float d = guess.x - rightRectX; + + x1=max(0.0F, min((float)rect->getCalibrationResult().imageSize.width-1, x1-d)); + } + return x1; + } else { + // We don't have to guess, we know it! + return rightRectX; + } + } +} diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparserectification.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparserectification.h new file mode 100644 index 00000000..8145b72d --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparserectification.h @@ -0,0 +1,49 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_SPARSERECTIFICATION_H +#define SPARSESTEREO_SPARSERECTIFICATION_H + +#include +#include + +namespace sparsestereo { + class StereoRectification; + + // Rectifies two sets of features from the left and right image + class SparseRectification { + public: + // Stores a pair of rectified and unrectified feature points + struct RectifiedFeature { + const cv::KeyPoint* imgPoint; + cv::Point2f rectPoint; + }; + + SparseRectification(bool subpixelFeatures, StereoRectification* rect); + + // Rectifies a set of sparse features + void rectify(const std::vector& inLeft, const std::vector& inRight, + std::vector* outLeft, std::vector* outRight); + + // Precomputes epilines start position + void precomputeEpilinesStart(int imageWidth, int imageHeight, cv::Mat_* dst); + + private: + // Comparision operator used for sorting feature points + static bool featureSortComp(const RectifiedFeature& left, const RectifiedFeature& right) { + return left.rectPoint.y < right.rectPoint.y || (left.rectPoint.y == right.rectPoint.y && + left.rectPoint.x < right.rectPoint.x); + } + + bool subpixelFeatures; + StereoRectification* rect; + + // Estimates the distorted point in the left image, for which the disparity between rectified left and + // right image is 0. The given left point just serves for a first estimate. + inline float estimateDistortedInfiniteLeftX(int leftImgY, int rightRectX); + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsestereo-inl.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsestereo-inl.h new file mode 100644 index 00000000..e0fcee22 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsestereo-inl.h @@ -0,0 +1,225 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_SPARSESTEREO_INL_H +#define SPARSESTEREO_SPARSESTEREO_INL_H + +#include "sparsestereo.h" +#include +#include +#include +#include "exception.h" +#include "stereorectification.h" +#include "simd.h" + +namespace sparsestereo { + using namespace std; + + template + SparseStereo::SparseStereo(int maxDisparity, float yTolerance, float uniqueness, + StereoRectification* rect, bool subpixelFeatures, bool storeUnmatched, int leftRightStep) + : maxDisparity(maxDisparity), yTolerance(yTolerance), uniqueness(uniqueness), rect(rect), + storeUnmatched(storeUnmatched), leftRightStep(leftRightStep), precompEpilinesStart(0, 0), + sparseRect(subpixelFeatures, rect) { + } + + template + SparseStereo::~SparseStereo() { + } + + template + void SparseStereo::match(const cv::Mat& left, const cv::Mat& right, + const std::vector& leftFeat, const std::vector& rightFeat, + cv::vector* matches) { + + if(left.size() != right.size() || (rect != NULL && left.size() != rect->getCalibrationResult().imageSize)) + throw Exception("Mismatching image sizes"); + if(leftFeat.size() == 0 || rightFeat.size() == 0) + return; // Can't match anything + + // For the first time, or when the image size changes, compute epiline lookup table + if(left.cols != precompEpilinesStart.cols || left.rows != precompEpilinesStart.rows) + sparseRect.precomputeEpilinesStart(left.cols, left.rows, &precompEpilinesStart); + + // Rectify feature points + sparseRect.rectify(leftFeat, rightFeat, &leftFeatures, &rightFeatures); + + // Features are now sorted from top left to bottom right + boost::shared_array offsets = SIMD::alignedNew(left.rows); + int maxRowLen __attribute__((unused)) = getRowOffsets(rightFeatures, offsets.get(), left.rows); + + minimumMatches.resize(leftFeatures.size()); + + // Perform matching + calcCosts(left, right, offsets.get()); + + // Perform left/right consistency check + denseConsistencyCheck(left, right); + + // Compose sparse disparity list + for(int i=0; i<(int)leftFeatures.size(); i++) { + const cv::KeyPoint* rightFeature = NULL; + cv::Point2f rightRect = leftFeatures[i].rectPoint + cv::Point2f(1, 0); //Disparity -1 + short cost = 0; + + if(minimumMatches[i].first != -1) { + rightFeature = rightFeatures[minimumMatches[i].first].imgPoint; + rightRect = rightFeatures[minimumMatches[i].first].rectPoint; + cost = minimumMatches[i].second; + } + + if(rightFeature != NULL || storeUnmatched) + matches->push_back(SparseMatch(leftFeatures[i].imgPoint, rightFeature, leftFeatures[i].rectPoint, rightRect, cost)); + } + } + + template + void SparseStereo::calcCosts(const cv::Mat& left, const cv::Mat& right, unsigned int* rowOffsets) { + + int lastRow = -1e9; //Invalid value + CORRELATION correlation; + correlation.setReferenceImage(left); + correlation.setComparisonImage(right); + + for(int l=0; l<(int)leftFeatures.size(); l++) { + // Find row start and end points + int ly = (int)(leftFeatures[l].rectPoint.y + 0.5); + int rightStart = rowOffsets[min(left.rows-1, max(0, int(ly - yTolerance)))]; + int rightEnd = rowOffsets[min(left.rows-1, max(0, int(ly + yTolerance) + 2))]; + + if(ly != lastRow) { + // Skip top and bottom + if(ly < correlation.getWindowSize()/2) { + continue; + } + else if(ly >= left.rows - correlation.getWindowSize()/2 -1) { + // We reached the bottom of the image. Lets invalidate the remaining features. + for(; l<(int)leftFeatures.size(); l++) + minimumMatches[l].first = -1; + break; + } + + // We have to initialize a new cost cube row + lastRow = ly; + } + + COST_TYPE minCost = numeric_limits::max(); + int minRightFeature = -1; + + correlation.setReferencePoint(cv::Point2i(int(leftFeatures[l].imgPoint->pt.x + 0.5), + int(leftFeatures[l].imgPoint->pt.y + 0.5))); + + for(int r=rightStart; r= leftFeatures[l].rectPoint.x - maxDisparity && + fabs(leftFeatures[l].rectPoint.y - rightFeatures[r].rectPoint.y) <= yTolerance && + rightFeatures[r].rectPoint.x >= correlation.getWindowSize()/2 && + rightFeatures[r].rectPoint.x < left.cols - correlation.getWindowSize()/2) + { + // It is! Let's compute a cost + COST_TYPE currentCost = correlation.match( + cv::Point2i(int(rightFeatures[r].imgPoint->pt.x + 0.5), int(rightFeatures[r].imgPoint->pt.y + 0.5))); + + if(currentCost < minCost) { // Only store smaller costs + minCost = currentCost; + minRightFeature = r; + } + } + } + + minimumMatches[l] = std::pair(minRightFeature, minCost); + } + } + + template + int SparseStereo::getRowOffsets(const std::vector& features, + unsigned int* offsets, int maxRows) { + int lastRow=-1, lastOffset = 0, longestRowLen = -1; + + for(unsigned int i=0; i= maxRows -1) + break; // The rectified point is outside the visible image + else if(y<0) + continue; + + if(y != lastRow) { + // Save offset + for(int j=lastRow+1; j<=y; j++) + offsets[j] = i; + + if((int)i - lastOffset > longestRowLen) + longestRowLen = i - lastOffset; + + lastRow = y; + lastOffset = i; + } + } + + // Save offset for the remaining items + for(int i=lastRow+1; i < maxRows; i++) + offsets[i] = features.size(); + if((int)features.size() - lastOffset > longestRowLen) + longestRowLen = features.size() - lastOffset; + + return longestRowLen; //Return the maximum row length + } + + template + void SparseStereo::denseConsistencyCheck(const cv::Mat& left, const cv::Mat& right) { + int lastRow = -1e9; //Invalid value + CORRELATION correlation; + correlation.setReferenceImage(right); + correlation.setComparisonImage(left); + + for(int l = 0; l < (int)leftFeatures.size(); l++) { + int ly = (int)(leftFeatures[l].rectPoint.y + 0.5); + if(ly != lastRow) { + // Skip top and bottom + if(ly < correlation.getWindowSize()/2) + continue; + else if(ly >= left.rows - correlation.getWindowSize()/2 -1) + break; + + lastRow = ly; + } + + // Get the minimum match and cost + int r = minimumMatches[l].first; + COST_TYPE minCost = minimumMatches[l].second; + + if(r == -1) + continue; + + // Get epiline start and end + float leftStartX = precompEpilinesStart(leftFeatures[l].imgPoint->pt.y, max(0, min( + precompEpilinesStart.cols, (int)(rightFeatures[r].rectPoint.x+0.5)))); + int startX = max(correlation.getWindowSize()/2, int(leftStartX /*+ 0.5*/)); + int endX = min(precompEpilinesStart.cols - correlation.getWindowSize()/2, + int(leftStartX + maxDisparity /*+ 0.5*/)); + Epiline epiline = rect!=NULL ? rect->getLeftEpiline(leftFeatures[l].imgPoint->pt) : Epiline(leftFeatures[l].imgPoint->pt.y); + if(!epiline.isValid()) + continue; + + // Preform consistency check + correlation.setReferencePoint(cv::Point2i(int(rightFeatures[r].imgPoint->pt.x + 0.5), int(rightFeatures[r].imgPoint->pt.y + 0.5))); + for(int x = startX; x < endX; x+= leftRightStep) { + int y = (int)(epiline.at(x) + 0.5); + if(y < correlation.getWindowSize()/2 || y >= left.rows - correlation.getWindowSize()/2) + continue; + + COST_TYPE currentCost = correlation.match(cv::Point2i(x, y)); + + if(currentCost < minCost/uniqueness && fabs(x - int(leftFeatures[l].imgPoint->pt.x+0.5)) > leftRightStep) { + minimumMatches[l].first = -1; + break; + } + } + } + } +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsestereo.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsestereo.h new file mode 100644 index 00000000..d4d44568 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/sparsestereo.h @@ -0,0 +1,57 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_SPARSESTEREO_H +#define SPARSESTEREO_SPARSESTEREO_H + +#include +#include +#include +#include +#include +#include "sparsematch.h" +#include "sparserectification.h" + +namespace sparsestereo { + class StereoRectification; + + // Class bundling sparse stereo algorithms + template + class SparseStereo { + public: + SparseStereo(int maxDisparity, float yTolerance = 1, float uniqueness = 0.6, StereoRectification* rect = NULL, + bool subpixelFeatures = false, bool storeUnmatched = false, int leftRightStep = 1); + ~SparseStereo(); + + // Matches using a census window + void match(const cv::Mat& left, const cv::Mat& right, const std::vector& leftFeat, + const std::vector& rightFeat, std::vector* matches); + + private: + std::vector leftFeatures, rightFeatures; + int maxDisparity; + float yTolerance; + float uniqueness; + StereoRectification* rect; + bool storeUnmatched; + int leftRightStep; + std::vector > minimumMatches; + cv::Mat_ precompEpilinesStart; // Preocomputed epilines start positions + cv::Size frameSize; + SparseRectification sparseRect; + + // Gets the starting offsets of each row and returns the maximum row length + int getRowOffsets(const std::vector& features, unsigned int* offsets, + int maxRows); + + // Calculates the matching costs using census windows + void calcCosts(const cv::Mat& left, const cv::Mat& right, unsigned int* rowOffsets); + + // Performs a left/right consistency check that is dense in the left image + void denseConsistencyCheck(const cv::Mat& left, const cv::Mat& right); + }; +} + +#endif diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/stereorectification.cpp b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/stereorectification.cpp new file mode 100644 index 00000000..1ccd67e7 --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/stereorectification.cpp @@ -0,0 +1,121 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#include "stereorectification.h" +#include +#include + +namespace sparsestereo { + using namespace std; + using namespace cv; + using namespace boost; + + scoped_array Epiline::dummyLine; + int Epiline::dummyLineLength = -1; + + void Epiline::setMaxEpilineLength(int len) { + if(dummyLineLength != len) { + dummyLine.reset(new float[len]); + memset(dummyLine.get(), 0, sizeof(float)*len); + dummyLineLength = len; + } + } + + StereoRectification::StereoRectification(const CalibrationResult& calibRes, Interpolation interpolation) + : interpolation(interpolation), calibRes(calibRes) + { + initImageRectMap(); + initPointRectMap(); + initEpilines(); + } + + void StereoRectification::initImageRectMap() { + // Create rectification maps for image rectification + initUndistortRectifyMap(calibRes.cameraMatrix[0], calibRes.distCoeffs[0], calibRes.R[0], + calibRes.P[0], calibRes.imageSize, CV_16SC2, imageRectMap[0][0], imageRectMap[0][1]); + initUndistortRectifyMap(calibRes.cameraMatrix[1], calibRes.distCoeffs[1], calibRes.R[1], + calibRes.P[1], calibRes.imageSize, CV_16SC2, imageRectMap[1][0], imageRectMap[1][1]); + } + + void StereoRectification::initPointRectMap() { + // Create rectification maps for integer point rectification + // Collect all image point coordinates + vector distortedPoints((calibRes.imageSize.width + 1) * (calibRes.imageSize.height + 1)); + for(int y=0; y<=calibRes.imageSize.height; y++) + for(int x=0; x<=calibRes.imageSize.width; x++) + distortedPoints[y * (calibRes.imageSize.width + 1) + x]/*, 0)*/ = Point2f(x, y); + for(int i=0; i<2; i++) { + // Perform rectification + vector undistortedPoints(distortedPoints.size()); + undistortPoints(distortedPoints, undistortedPoints, calibRes.cameraMatrix[i], calibRes.distCoeffs[i], + calibRes.R[i], calibRes.P[i]); + // Store results + pointRectMap[i] = Mat_(calibRes.imageSize.height + 1, calibRes.imageSize.width + 1, Point2f(-1, -1)); + for(int y=0; y<= calibRes.imageSize.height; y++) + for(int x=0; x<= calibRes.imageSize.width; x++) + pointRectMap[i](y,x) = undistortedPoints[y * (calibRes.imageSize.width + 1) + x];//, 0); + } + } + + void StereoRectification::initEpilines() { + for(int i=0; i<2; i++) { + epilines[i] = Mat_(calibRes.imageSize, -1e6); + // First calculate a float undistortion map + Mat_ rectMap[2]; + initUndistortRectifyMap(calibRes.cameraMatrix[i], calibRes.distCoeffs[i], calibRes.R[i], + calibRes.P[i], calibRes.imageSize, CV_32F, rectMap[0], rectMap[1]); + + //Calculate epilines + for(int y=0; y < calibRes.imageSize.height; y++) { + for(int x=0; x=0 && imgX < calibRes.imageSize.width) { + double interpolated = rectMap[1](y, x) + dy / dx * (imgX - rectMap[0](y, x)); + if(interpolated >= 0 && interpolated < calibRes.imageSize.height) { + epilines[i](y, imgX) = interpolated; + } + } + } + } + } + + // Fill epilines index + epilineIndex[i] = Mat_(calibRes.imageSize, -1); + for(int line = 0; line < calibRes.imageSize.height - 1; line++) + for(int x=0; x=0 && imgY < calibRes.imageSize.height) { + if(fabs(imgY - epilines[i](line, x)) < fabs(imgY - epilines[i](line+1, x))) + epilineIndex[i](imgY, x) = line; + else epilineIndex[i](imgY, x) = line + 1; + } + } + } + } + + Point2f StereoRectification::highPrecisionRectifyLeftPoint(Point2f inLeft) const { + Mat_ inLeftPoint(1, 1, inLeft); + vector outLeftPoint; + + undistortPoints(inLeftPoint, outLeftPoint, calibRes.cameraMatrix[0], calibRes.distCoeffs[0], + calibRes.R[0], calibRes.P[0]); + + return outLeftPoint[0]; + } + + Point2f StereoRectification::highPrecisionRectifyRightPoint(Point2f inRight) const { + Mat_ inRightPoint(1, 1, inRight); + vector outRightPoint; + + undistortPoints(inRightPoint, outRightPoint, calibRes.cameraMatrix[1], calibRes.distCoeffs[1], + calibRes.R[1], calibRes.P[1]); + + return outRightPoint[0]; + } +} diff --git a/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/stereorectification.h b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/stereorectification.h new file mode 100644 index 00000000..d9da153e --- /dev/null +++ b/perception/mil_vision/exFAST_SparseStereo/src/sparsestereo/stereorectification.h @@ -0,0 +1,163 @@ +/* + * Author: Konstantin Schauwecker + * Year: 2012 + */ + +#ifndef SPARSESTEREO_STEREORECTIFICATION_H +#define SPARSESTEREO_STEREORECTIFICATION_H + +#include +#include +#include +#include +#include +#include "calibrationresult.h" + +namespace sparsestereo { + // Stores one epiline and provides element access + class Epiline{ + public: + Epiline(): line(NULL), dy(0) {} + + // Stores one real epiline + Epiline(const cv::Mat_& refLines, const cv::Mat_ refLineIndices, const cv::Point2f& pointOnLine) { + int index = refLineIndices(int(pointOnLine.y + 0.5), int(pointOnLine.x + 0.5)); + valid = (index != -1); + if(valid) { + assert(index != -1); + dy = pointOnLine.y - refLines(index, (int)round(pointOnLine.x)); + line = &refLines(index, 0); +#ifndef NDEBUG + width = refLines.cols; +#endif + } +#ifndef NDEBUG + else width = 0; +#endif + } + + // Creates a dummy epiline + Epiline(float y): line(dummyLine.get()), dy(y), valid(true) { +#ifndef NDEBUG + width=dummyLineLength; +#endif + } + + // Returns the y-coordinate for the epiline at position x + float at(int x) const { + assert(x >= 0 && x dummyLine; + static int dummyLineLength; + const float* line; + float dy; +#ifndef NDEBUG + int width; +#endif + bool valid; + }; + + // Class for rectifying input images + class StereoRectification { + public: + enum Interpolation { + Cubic = cv::INTER_CUBIC, + Linear = cv::INTER_LINEAR, + Nearest = cv::INTER_NEAREST + }; + + StereoRectification(const CalibrationResult& calibRes, Interpolation iterpolation = Linear); + + // Rectifies a stereo pair + template + void rectifyStereoPair(const std::pair, cv::Mat_ >& input, std::pair, cv::Mat_ >* output) const { + rectifyLeftImage(input.first, &(output->first)); + rectifyRightImage(input.second, &(output->second)); + } + + // Rectifies the left image + template + void rectifyLeftImage(const cv::Mat_& input, cv::Mat_* output) const { + cv::remap(input, *output, imageRectMap[0][0], imageRectMap[0][1], interpolation); + } + + // Rectifies the right image + template + void rectifyRightImage(const cv::Mat_& input, cv::Mat_* output) const { + cv::remap(input, *output, imageRectMap[1][0], imageRectMap[1][1], interpolation); + } + + // Rectifies a left integer point + cv::Point2f rectifyLeftPoint(cv::Point2i inLeft) const { + return pointRectMap[0](inLeft.y, inLeft.x); + } + + // Rectifies a right integer point + cv::Point2f rectifyRightPoint(cv::Point2i inRight) const { + return pointRectMap[1](inRight.y, inRight.x); + } + + // Rectifies a left float point + cv::Point2f rectifyLeftPoint(cv::Point2f inLeft) const { + return interpolatedLookup(pointRectMap[0], inLeft); + } + + // Rectifies a right float point + cv::Point2f rectifyRightPoint(cv::Point2f inRight) const { + return interpolatedLookup(pointRectMap[1], inRight); + } + + // Rectifies a left float point. This implementation is slow! + cv::Point2f highPrecisionRectifyLeftPoint(cv::Point2f inLeft) const; + + // Rectifies a right float point. This implementation is slow! + cv::Point2f highPrecisionRectifyRightPoint(cv::Point2f inRight) const; + + // Returns an epiline going through the given left point + Epiline getLeftEpiline(cv::Point2f p) const { + return Epiline(epilines[0], epilineIndex[0], p); + } + + // Returns an epiline going through the given left point + Epiline getRightEpiline(cv::Point2f p) const { + return Epiline(epilines[1], epilineIndex[1], p); + } + + // Returns the calibration results + const CalibrationResult& getCalibrationResult() const {return calibRes;} + + private: + cv::Mat imageRectMap[2][2]; + cv::Mat_ pointRectMap[2]; + cv::Mat_ epilines[2]; + cv::Mat_ epilineIndex[2]; + Interpolation interpolation; + CalibrationResult calibRes; + + void initImageRectMap(); + void initPointRectMap(); + void initEpilines(); + + // Preforms an bilinear interpolated lookup + cv::Point2f interpolatedLookup(const cv::Mat_& map, cv::Point2f pt) const { + float dx = pt.x - int(pt.x); + float dy = pt.y - int(pt.y); + + cv::Point2f top = (1.0 - dx) * map(int(pt.y), int(pt.x)) + dx * map(int(pt.y), int(pt.x + 1)); + cv::Point2f bottom = (1.0 - dx) * map(int(pt.y + 1), int(pt.x)) + dx * map(int(pt.y + 1), int(pt.x + 1)); + return (1.0 - dy) * top + dy * bottom; + } + }; +} + +#endif diff --git a/perception/mil_vision/include/camera_lidar_transformer.hpp b/perception/mil_vision/include/camera_lidar_transformer.hpp new file mode 100644 index 00000000..1273129d --- /dev/null +++ b/perception/mil_vision/include/camera_lidar_transformer.hpp @@ -0,0 +1,65 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DO_ROS_DEBUG + +#ifdef DO_ROS_DEBUG +#include "opencv2/opencv.hpp" +#include +#include +#include +#endif + +class CameraLidarTransformer +{ + private: + std::string camera_info_topic; + ros::NodeHandle nh; + ros::ServiceServer transformServiceServer; + tf2_ros::Buffer tfBuffer; + tf2_ros::TransformListener tfListener; + message_filters::Subscriber lidarSub; + message_filters::Cache lidarCache; + ros::Subscriber cameraInfoSub; + image_geometry::PinholeCameraModel cam_model; + bool camera_info_received; + sensor_msgs::CameraInfo camera_info; + bool inCameraFrame(cv::Point2d& p); + void cameraInfoCallback(const sensor_msgs::CameraInfo info); + void drawPoint(cv::Mat& mat, cv::Point2d& p, cv::Scalar color=cv::Scalar(0, 0, 255)); + bool transformServiceCallback(mil_msgs::CameraToLidarTransform::Request &req,mil_msgs::CameraToLidarTransform::Response &res); + static ros::Duration MAX_TIME_ERR; + #ifdef DO_ROS_DEBUG + ros::Publisher pubMarkers; + image_transport::ImageTransport image_transport; + image_transport::Publisher points_debug_publisher; + #endif + public: + CameraLidarTransformer(); +}; diff --git a/perception/mil_vision/include/mil_vision_lib/active_contours.hpp b/perception/mil_vision/include/mil_vision_lib/active_contours.hpp new file mode 100644 index 00000000..ed187f0e --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/active_contours.hpp @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace mil_vision +{ + +class SegmentDescription; +class ClosedCurve; + +namespace Perturbations +{ + static std::map, std::vector>> perturbation_cache; + void initPerturbationCache(); + std::vector> getPerturbations(uint8_t entry, uint8_t exit); + uint8_t getIdxFromPoint(cv::Point2i hood_point); + cv::Point2i getPointFromIdx(uint8_t idx); + std::vector getPointList(std::vector& idx_list); + std::vector getHoodIdxs(uint8_t idx, bool include_border); + bool isNeighbor(uint8_t idx1, uint8_t idx2); + bool isNeighborPoint(const cv::Point2i &pt1, const cv::Point2i &pt2); + void growRoute(const std::vector& partial, const std::vector& occupied, + uint8_t entry, uint8_t exit); + std::vector perturb(const std::vector& src_curve, std::vector perturbation,int idx); +} // namespace Perturbations + +class ClosedCurve +{ + std::vector _curve_points; + +public: + struct Perturbation + { + size_t idx; + uint8_t entry; + uint8_t exit; + std::vector route; + }; + + ClosedCurve(std::vector points); + void applyPerturbation(const std::vector& perturbation, int idx); + ClosedCurve perturb(const std::vector& perturbation, int idx); + static bool validateCurve(std::vector& curve); + std::vector calcCosts(const cv::Mat& img, std::vector candidate_perturbs, + std::function cb); +}; + +class ActiveContour +{ + ClosedCurve _contour; + +public: + ActiveContour(); +}; + +} // namespace mil_vision diff --git a/perception/mil_vision/include/mil_vision_lib/colorizer/camera_observer.hpp b/perception/mil_vision/include/mil_vision_lib/colorizer/camera_observer.hpp new file mode 100644 index 00000000..58eefcc3 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/colorizer/camera_observer.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include // Common includes are here +#include +#include +#include +#include + +namespace mil_vision{ + +class CameraObserver +{ + using CamStream = ROSCameraStream; + + ros::NodeHandle _nh; + + ROSCameraStream _cam_stream; + tf::TransformListener _tf_listener; + + std::string _err_msg{""}; + bool _ok{false}; + +public: + CameraObserver(ros::NodeHandle &nh, std::string &pcd_in_topic, std::string &cam_topic, size_t hist_size); + + std::shared_ptr getCameraModelPtr() const + { + return _cam_stream.getCameraModelPtr(); + } + + std::vector operator()(const PCD::ConstPtr &pcd) + { + return std::vector{}; + } + + bool ok() const + { + return _ok && ros::ok(); + } + + ColorObservation::VecImg get_color_observations(const PCD::ConstPtr &pcd); + +}; + +} // namespace mil_vision diff --git a/perception/mil_vision/include/mil_vision_lib/colorizer/color_observation.hpp b/perception/mil_vision/include/mil_vision_lib/colorizer/color_observation.hpp new file mode 100644 index 00000000..fb7ec651 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/colorizer/color_observation.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include + + +namespace mil_vision{ + +struct alignas(16) ColorObservation +{ + using Vec = std::vector; + using VecImg = cv::Mat_; + // float tstamp; // seconds + float xyz[3]; + uint8_t bgr[3]; +}; + + +class UnoccludedPointsImg +{ +/* This class stores the indices of points on a point cloud that are not occluded + from a given camera view. It quantizes the projection of the point cloud points + into the image plane into pixel bins and selects the point closest to the center + of projection to be the only visible one for a given image pixel. + */ + + ColorObservation::VecImg unouccluded_pt_idxs; + cv::Mat_ distance_image; + PCDPtr cloud_ptr; + +public: + UnoccludedPointsImg(); +}; + +struct PointColorStats +{ + float xyz[3]; + uint8_t bgr[3]; + float var[3]; + int n; +}; + +} //namespace mil_vision \ No newline at end of file diff --git a/perception/mil_vision/include/mil_vision_lib/colorizer/common.hpp b/perception/mil_vision/include/mil_vision_lib/colorizer/common.hpp new file mode 100644 index 00000000..2930fd3f --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/colorizer/common.hpp @@ -0,0 +1,30 @@ +#pragma once + +/* + This file includes all of the common dependencies for the files in the colorizer directory. + It also forward declares many of the most commonly used types and type aliases. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace mil_vision{ + +template using PCD = pcl::PointCloud; +template using PCDPtr = std::shared_ptr>; +template using SPtrVector = std::vector>; +template using UPtrVector = std::vector>; + +} // namespace mil_vision \ No newline at end of file diff --git a/perception/mil_vision/include/mil_vision_lib/colorizer/pcd_colorizer.hpp b/perception/mil_vision/include/mil_vision_lib/colorizer/pcd_colorizer.hpp new file mode 100644 index 00000000..03a81e67 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/colorizer/pcd_colorizer.hpp @@ -0,0 +1,72 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace mil_vision{ + +class PcdColorizer{ + /* + This class takes adds color information to XYZ only point clouds if the + points in the cloud can be observed by a camera that takes color images + Note: needs accurate TF because it uses the ros tf package to transform + arbitrary point cloud frames into the frame of the color pinhole camera + Note: the rgb camera topic should be streaming rectified images + */ + + using CamStream = ROSCameraStream; + +public: + + PcdColorizer(ros::NodeHandle nh, std::string input_pcd_topic); + + bool ok() const + { + return _ok && ros::ok(); + } + +private: + + std::mutex change_input_mtx; // Assures ptr to input cloud won't be changed + // while work relying on it is being done + + // Flags + bool _work_to_do = false; // Unprocessed PCD + bool _active = false; // Activation status + bool _ok = false; // Error flag + std::string _err_msg; + + ros::NodeHandle _nh; + size_t _img_hist_size{10}; // All camera streams will keep this many frames + // in their buffers + SingleCloudProcessor _cloud_processor; + + // Subscribing and storing input + std::string _input_pcd_topic; + ros::Subscriber _cloud_sub; + PCD<>::ConstPtr _current_color_pcd; // Template default argument is in common.hpp + + // Storing result and publishing + std::string _output_pcd_topic; + ros::Publisher _cloud_pub; + PCD<>::ConstPtr _output_pcd; + + // SPtrVector _ros_cam_ptrs; + // SPtrVector _transformed_cloud_ptrs; + // SPtrVector _obs_vec_img_ptrs; + // PCD color_permanence_pcd; + + + void _cloud_cb(const PCD<>::ConstPtr &cloud_in); + // void _process_pcd(const PointCloud::ConstPtr &cloud_in); + // void _combine_pcd(); + // tf::TransformListener _tf_listener; + +}; // end class PcdColorizer + +} // namespace mil_vision diff --git a/perception/mil_vision/include/mil_vision_lib/colorizer/single_cloud_processor.hpp b/perception/mil_vision/include/mil_vision_lib/colorizer/single_cloud_processor.hpp new file mode 100644 index 00000000..446b9b78 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/colorizer/single_cloud_processor.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace mil_vision { + +class SingleCloudProcessor +{ +public: + SingleCloudProcessor(ros::NodeHandle nh, std::string &in_pcd_topic, size_t hist_size); + void operator()(const PCD::ConstPtr &pcd); + bool ok() const { return _ok && ros::ok(); } + +private: + ros::NodeHandle _nh; + std::string in_pcd_topic; + size_t _hist_size; // image history buffer size + + // CameraObserver creates point color observations for a specific camera + UPtrVector _camera_observers; + + // Inter-thread communication + std::promise _start_work_prom; + std::shared_future _start_work_fut{_start_work_prom.get_future()}; + std::vector> _worker_done_proms; + std::vector> _worker_done_futs; + + + int seq = 0; + + bool _ok{false}; + std::string _err_msg{""}; + +}; + +} // namespace mil_vision diff --git a/perception/mil_vision/include/mil_vision_lib/cv_tools.hpp b/perception/mil_vision/include/mil_vision_lib/cv_tools.hpp new file mode 100644 index 00000000..bd608942 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/cv_tools.hpp @@ -0,0 +1,167 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include + +// #define SEGMENTATION_DEBUG + +namespace mil_vision { + +/// UTILS + +/// Templated pseudoinverse function implementation +template +bool pseudoInverse(const _Matrix_Type_ &a, _Matrix_Type_ &result, + double epsilon = std::numeric_limits::epsilon()) +{ + if (a.rows() < a.cols()) + return false; + + Eigen::JacobiSVD<_Matrix_Type_> svd = a.jacobiSvd(); + + typename _Matrix_Type_::Scalar tolerance = epsilon * std::max(a.cols(), a.rows()) * + svd.singularValues().array().abs().maxCoeff(); + + result = + svd.matrixV() * _Matrix_Type_(_Matrix_Type_((svd.singularValues().array().abs() > tolerance) + .select(svd.singularValues().array().inverse(), 0)).diagonal()) * svd.matrixU().adjoint(); +} + +typedef std::vector Contour; + +// Compute the centroid of an OpenCV contour (Not templated) +cv::Point contour_centroid(Contour &contour); + +// Used as a comparison function for std::sort(), sorting in order of decreasing +// perimeters +// returns true if the contourArea(c1) > contourArea(c2) +bool larger_contour(const Contour &c1, const Contour &c2); + +// Filter a histogram of type cv::MatND generated by cv::calcHist using a +// gaussian kernel +cv::MatND smooth_histogram(const cv::MatND &histogram, + size_t filter_kernel_size = 3, float sigma = 1.0); + +// Generate a one-dimensional gaussian kernel given a kernel size and it's +// standard deviation +// (sigma) +std::vector generate_gaussian_kernel_1D(size_t kernel_size = 3, + float sigma = 1.0); + +// Finds positive local maxima greater than (global maximum * thresh_multiplier) +std::vector find_local_maxima(const cv::MatND &histogram, + float thresh_multiplier); + +// Finds negative local minima less than (global minimum * thresh_multiplier) +std::vector find_local_minima(const cv::MatND &histogram, + float thresh_multiplier); + +// Selects the mode of a multi-modal distribution closest to a given target +// value +unsigned int select_hist_mode(std::vector &histogram_modes, + unsigned int target); + +// Takes in a grayscale image and segments out a semi-homogenous foreground +// object with pixel intensities close to . Tuning of last three +// parameters may imrove results but default values should work well in +// most cases. +void statistical_image_segmentation(const cv::Mat &src, cv::Mat &dest, + cv::Mat &debug_img, const int hist_size, + const float **ranges, const int target, + std::string image_name = "Unnamed Image", + bool ret_dbg_img = false, + const float sigma = 1.5, + const float low_thresh_gain = 0.5, + const float high_thresh_gain = 0.5); + +cv::Mat triangulate_Linear_LS(cv::Mat mat_P_l, cv::Mat mat_P_r, + cv::Mat undistorted_l, cv::Mat undistorted_r); + +Eigen::Vector3d kanatani_triangulation(const cv::Point2d &pt1, + const cv::Point2d &pt2, + const Eigen::Matrix3d &essential, + const Eigen::Matrix3d &R); + +Eigen::Vector3d lindstrom_triangulation(const cv::Point2d &pt1, + const cv::Point2d &pt2, + const Eigen::Matrix3d &essential, + const Eigen::Matrix3d &R); + +struct ImageWithCameraInfo { + /** + Packages corresponding sensor_msgs::ImageConstPtr and + sensor_msgs::CameraInfoConstPtr + info_msg + into one object. Containers of these objects can be sorted by their + image_time attribute + */ +public: + ImageWithCameraInfo() {} + ImageWithCameraInfo(sensor_msgs::ImageConstPtr _image_msg_ptr, + sensor_msgs::CameraInfoConstPtr _info_msg_ptr); + sensor_msgs::ImageConstPtr image_msg_ptr; + sensor_msgs::CameraInfoConstPtr info_msg_ptr; + ros::Time image_time; + bool operator<(const ImageWithCameraInfo &right) const { + return this->image_time < right.image_time; + } +}; + +class FrameHistory { + /** + Object that subscribes itself to an image topic and stores up to a + user defined + number of ImageWithCameraInfo objects. The frame history can then be + retrieved + in whole or just a portion. + */ +public: + FrameHistory(std::string img_topic, unsigned int hist_size); + ~FrameHistory(); + void image_callback(const sensor_msgs::ImageConstPtr &image_msg, + const sensor_msgs::CameraInfoConstPtr &info_msg); + std::vector + get_frame_history(unsigned int frames_requested); + int frames_available(); + + const std::string topic_name; + const size_t history_size; + +private: + ros::NodeHandle nh; + image_transport::CameraSubscriber _image_sub; + image_transport::ImageTransport _image_transport; + std::vector _frame_history_ring_buffer; + size_t frame_count; +}; + +/// Param Helpers + +struct Range { + cv::Scalar lower; + cv::Scalar upper; +}; + +void range_from_param(std::string ¶m_root, Range &range); + +void inParamRange(cv::Mat &src, Range &range, cv::Mat &dest); + +} // namespace sub diff --git a/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_frame.hpp b/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_frame.hpp new file mode 100644 index 00000000..c3cf6c5a --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_frame.hpp @@ -0,0 +1,202 @@ +#pragma once + +// #include +#include +#include +#include +#include + +#include +#include +#include + + +namespace mil_vision +{ + +enum class PixelType +{ +/* + Enumeration for dealing with different image pixel types + The underlying integers for these enums are compatible with OpenCV's + "CV_C" + macros. The benefit to having these is that we do not have to have OpenCV as a dependency. + Theoretically, the CameraFrame Objects need not use a cv::Mat and this class' functionality + would not be affected. The underscore in front of these enums is to solve naming conflicts + with commonly OpenCV and Eigen macros. +*/ + _8UC1=0, _8SC1, _16UC1, _16SC1, _32SC1, _32FC1, _64FC1, + _8UC2=8, _8SC2, _16UC2, _16SC2, _32SC2, _32FC2, _64FC2, + _8UC3=16, _8SC3, _16UC3, _16SC3, _32SC3, _32FC3, _64FC3, + _8UC4=24, _8SC4, _16UC4, _16SC4, _32SC4, _32FC4, _64FC4, _UNKNOWN = -1 +}; + +template, + typename img_scalar_t = uint8_t, + typename time_t_ = ros::Time, + typename float_t = float> +class CameraFrame +{ +/* + This class is used to represent a single frame from a camera. It contains information about + the time the image was taken, the id and camera parameters of the camera that took it and the + image itself. This is a templated class whose first template parameter is the type of the + pixel elements (commonly uint8_t for grayscale images and cv::Vec3b for RGB images) +*/ + +public: + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Constructors and Destructors /////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + CameraFrame() // Default Constructor + { + } + + CameraFrame(const CameraFrame &other) // Copy Constructor + { + this->_seq = other._seq; + this->_stamp = other._stamp; + this->_image = other._image.clone(); // Object will have unoque copy of image data + this->_cam_model_ptr = other.cam_model_ptr; + } + + // From ROS img msg + CameraFrame(const sensor_msgs::ImageConstPtr &image_msg_ptr, cam_model_ptr_t &cam_model_ptr, + bool is_rectified = false, float_t store_at_scale = 1.0); + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Public Methods ///////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + cam_model_ptr_t getCameraModelPtr() const + { + return _cam_model_ptr; + } + + unsigned int seq() const + { + return _seq; + } + + time_t_ stamp() const + { + return _stamp; + } + + const cv::Mat_& image() const + { + return _image; + } + + bool rectified() const + { + return _rectified; + } + + float_t getImageScale() const + { + return _img_scale; + } + + void copyImgTo(cv::Mat dest) const + { + dest = image.clone(); + } + + bool isCameraGeometryKnown() const + { + return _cam_model_ptr == nullptr; + } + + +private: + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Private Members //////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + // Frames from the same source will have sequentially increasing seq numbers + unsigned int _seq = 0; + + // Time that this image was taken + time_t_ _stamp; + + // Stores the image data (not a cv:Mat header pointing to shared image data) + cv::Mat_ _image; + + // Points to a camera model object (shared ownership) that stores information about the intrinsic + // and extrinsic geometry of the camera used to take this image + cam_model_ptr_t _cam_model_ptr = nullptr; + + // Identifies if this image has already been rectified with the distortion parameters in the + // associated camera model object + bool _rectified = false; + + // Scale of the image compared to that which would be generated by projecting using the camera + // geometry expresed in the associated camera model object + float_t _img_scale = 1.0f; + + // Stores the pixel data type + mil_vision::PixelType TYPE = mil_vision::PixelType::_UNKNOWN; + + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Private Methods //////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + // void project3DPointToImagePlane(Eigen::Matrix cam_frame_pt); +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// +////// Templated function implementations ///////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Constructor from ROS image message and ROS pinhole camera model +template +CameraFrame::CameraFrame( + const sensor_msgs::ImageConstPtr &image_msg_ptr, cam_model_ptr_t &cam_model_ptr, + bool is_rectified, float_t store_at_scale) +try +{ + // ROS image message decoding + cv_bridge::CvImageConstPtr _ros_img_bridge; + std::string encoding = image_msg_ptr->encoding; + _ros_img_bridge = cv_bridge::toCvShare(image_msg_ptr, encoding); + _ros_img_bridge->image.copyTo(_image); + + // Resize image as requested + if(store_at_scale != 1.0) + { + cv::resize(_image, _image, cv::Size(0, 0), store_at_scale, store_at_scale); + this->_img_scale = store_at_scale; + } + + // Store ptr to cam model object + this->_cam_model_ptr = cam_model_ptr; + + this->_rectified = is_rectified; + + // Get header information + _seq = image_msg_ptr->header.seq; + _stamp = image_msg_ptr->header.stamp; +} +catch(cv_bridge::Exception &e) +{ + ROS_WARN("Error converting sensor_msgs::ImageConstPtr to cv::Mat."); +} +catch( cv::Exception &e ) +{ + std::string err_msg = "Error copying cv::Mat created from ROS image message to the cv::Mat stored in the Camera Frame Object: " + + std::string(e.what()); + ROS_WARN(err_msg.c_str()); + std::cout << "exception caught: " << err_msg << std::endl; +} +catch(const std::exception &e) +{ + ROS_WARN(e.what()); +} + +} // namespace mil_vision + diff --git a/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_frame_sequence.hpp b/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_frame_sequence.hpp new file mode 100644 index 00000000..3369f122 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_frame_sequence.hpp @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace mil_vision +{ + +template +class CameraFrameSequence +{ +/* + This is an abstract class interface for classes that represent sequences of images. It provides + basic functionality to access individual frames along with camera geometry information. It is + intended to make it more convenient to create visual algorithms that take into account temporal + information. +*/ + +public: + + // Type aliases + using CamFrame = CameraFrame; + using CamFramePtr = std::shared_ptr; + using CamFrameConstPtr = std::shared_ptr; + using CamFrameSequence = CameraFrameSequence; + using CircularBuffer = boost::circular_buffer; + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Constructors and Destructors /////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + CameraFrameSequence() = default; + + CameraFrameSequence(const CameraFrameSequence&) = delete; // Forbid copy construction + + CameraFrameSequence(CameraFrameSequence&&) = delete; // Forbid move construction + + virtual ~CameraFrameSequence() = default; + + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Public Methods ///////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + virtual size_t size() const = 0; + + virtual cam_model_ptr_t getCameraModelPtr() const = 0; // returns nullptr if geometry is unknown + + // If geometry is constant then calls to rows, or calls will be valid + virtual bool isGeometryConst() const = 0; + + virtual int rows() const = 0; + + virtual int cols() const = 0; + + // Accessors + + // Returns a copy of the CameraFrame taken closest to the given time + virtual CamFrameConstPtr getFrameFromTime(time_t_) = 0; + + // Returns reference to the nth frame from the most recent. For example frame_sequence[0] is + // the most recent frame, frame_sequence[-1] is the oldest frame, and frame_sequence[-2] is + // the second oldest frame + virtual CamFrameConstPtr operator[](int) = 0; + +}; + +} // namespace mil_vision \ No newline at end of file diff --git a/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_model.hpp b/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_model.hpp new file mode 100644 index 00000000..3e6c7ee2 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/image_acquisition/camera_model.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include +#include + +namespace mil_vision{ + +template +class CameraModel{ +public: + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Constructors and Destructors /////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + CameraModel() + { + } + + ~CameraModel() + { + } + +private: + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Private Methods //////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + Eigen::Matrix get_projection_matrix(); + + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Private Members //////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + int ROWS = 0; + int COLS = 0; + + // Distortion model (only the plum-bob model is currently supported) + T D[5] = {0, 0, 0, 0, 0}; + + // Camera Geometry + Eigen::Matrix K; // Camera intrinsics + Eigen::Matrix C; // Center of projection in world frame + Eigen::Matrix R; // Orientation of camera frame relative to world frame + + +}; + + +template +Eigen::Matrix CameraModel::get_projection_matrix() +{ + Eigen::Matrix v; + Eigen::DiagonalMatrix I = v.asDiagonal(); + Eigen::Matrix aug; + aug.block(0, 0, 2, 2) = I; + aug. col(2) = C; + return K * R * aug; +} + +} // namespace mil_vision \ No newline at end of file diff --git a/perception/mil_vision/include/mil_vision_lib/image_acquisition/ros_camera_stream.hpp b/perception/mil_vision/include/mil_vision_lib/image_acquisition/ros_camera_stream.hpp new file mode 100644 index 00000000..bfe1a120 --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/image_acquisition/ros_camera_stream.hpp @@ -0,0 +1,340 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +// DBG +#include + + +namespace mil_vision +{ + +template +class ROSCameraStream : public CameraFrameSequence, img_scalar_t, ros::Time, float_t> +{ + using cam_model_ptr_t = std::shared_ptr; + using time_t_ = ros::Time; + +public: + + // Type aliases + using CamFrame = CameraFrame; + using CamFramePtr = std::shared_ptr; + using CamFrameConstPtr = std::shared_ptr; + using CamFrameSequence = CameraFrameSequence; + using CircularBuffer = boost::circular_buffer; + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Constructors and Destructors /////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + // Default Constructor + ROSCameraStream(ros::NodeHandle nh, size_t buffer_size) + : _frame_ptr_circular_buffer(buffer_size), _nh(nh), _it(_nh) {} + + ~ROSCameraStream(); + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Public Methods ///////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + // Returns true if the camera stream object has been successfully initialized + bool init(std::string &camera_topic); + + // Returns false if an internal error has ocurred or ROS is in the process of shutting down the + // enclosing node + bool ok() {return _ok && ros::ok();}; + + typename CircularBuffer::iterator begin() const + { + return _frame_ptr_circular_buffer.begin(); + } + + typename CircularBuffer::iterator end() const + { + return _frame_ptr_circular_buffer.end(); + } + + size_t size() const + { + return _frame_ptr_circular_buffer.size(); + } + + cam_model_ptr_t getCameraModelPtr() const + { + return _cam_model_ptr; // returns nullptr if geometry is unknown + } + + // If geometry is constant then calls to rows, or calls will be valid + bool isGeometryConst() const + { + return true; + } + + int rows() const + { + return ROWS; + } + + int cols() const + { + return COLS; + } + + CamFrameConstPtr getFrameFromTime(ros::Time desired_time); + + CamFrameConstPtr operator[](int i); + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Public Members ///////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + +private: + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Private Methods //////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + void _addFrame(CamFramePtr &new_frame_ptr); + + void _newFrameCb(const sensor_msgs::ImageConstPtr &image_msg_ptr); + + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Private Members //////////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////////////////////////////// + + // Container for all included CameraFrame objects + CircularBuffer _frame_ptr_circular_buffer; + + // cv::Ptr to a shared CameraInfo object for all frames in the sequence. If null, it indicates + // that the Frames have different CameraInfo objects which should be examined individually + cam_model_ptr_t _cam_model_ptr = nullptr; + + // Time bounds of buffer + time_t_ _start_time {}; + time_t_ _end_time {}; + + // Shared CameraFrame properties if camera geometry is constant + int COLS = -1; + int ROWS = -1; + mil_vision::PixelType TYPE = mil_vision::PixelType::_UNKNOWN; + + // Mutex for multithreaded access to camera frame data + std::mutex _mtx; + + // ROS node handle + ros::NodeHandle _nh; + + // ROS image transportg + image_transport::ImageTransport _it; + + // Custom ROS Callback Queue + ros::CallbackQueue _cb_queue; + + // Flexible topic subscription with potential for filtering and chaining + message_filters::Subscriber img_sub; + + // ROS spinner to handle callbacks in a background thread + ros::AsyncSpinner async_spinner {1, &_cb_queue}; + + // The ros topic we will subscribe to + std::string _img_topic = "uninitialized"; + + // Status Flag + bool _ok = false; + + // Store error messages here + std::string _err_msg{""}; +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// +////// Templated function implementations ///////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Initializer for when there is an image msg being published w/ a corresponding camera info msg +template +bool ROSCameraStream::init(std::string &camera_topic) +{ + using mil_tools::operator "" _s; // convert raw string literal to std::string + _img_topic = camera_topic; + bool success = false; + image_transport::CameraSubscriber cam_sub; + + // subscribes to image msg and camera info and initializes CameraModel Object then disconnets subscriber + auto init_lambda = + [&](const sensor_msgs::ImageConstPtr &image_msg_ptr, const sensor_msgs::CameraInfoConstPtr &info_msg_ptr) mutable + { + std::string init_msg {"ROSCameraStream: Initializing with "}; + init_msg += _img_topic; + // ROS_INFO_NAMED("ROSCameraStream", init_msg.c_str()); + // Set ptr to camera model object + std::shared_ptr camera_model_ptr{new image_geometry::PinholeCameraModel()}; + camera_model_ptr->fromCameraInfo(info_msg_ptr); + this->_cam_model_ptr = camera_model_ptr; + + // Set metadata attributes + this->ROWS = image_msg_ptr->height; + this->COLS = image_msg_ptr->width; + success = true; + cam_sub.shutdown(); + return; + }; + + // Subscribe to both camera topic and camera info topic + cam_sub = _it.subscribeCamera(_img_topic, 100, init_lambda); + + // The amount of time that we will try to wait for a cb from cam_sub + ros::WallDuration timeout{5, 0}; + + // Loop to check if we get callbacks from cam_sub + auto sub_start = ros::WallTime::now(); + ros::WallRate rate{10}; // 10 HZ so we dont spam cpu + while(ros::WallTime::now() - sub_start < timeout) + { + ros::spinOnce(); + if(success) + { + // Subscribe to ROS image topic + img_sub.subscribe(_nh, _img_topic, 100, ros::TransportHints(), &_cb_queue); + + // Register callback to process frames published on camera img topic + auto img_cb = [this](const sensor_msgs::ImageConstPtr &image_msg_ptr){_newFrameCb(image_msg_ptr);}; + img_sub.registerCallback(img_cb); + + // Start listening for image messages in a background thread + async_spinner.start(); + + _ok = true; + return _ok; // Ideal exit point for init + } + rate.sleep(); + } + + _err_msg = "ROSCameraStream: Timed out trying to initialize with camera topic "_s + _img_topic + + ". Run 'rostopic echo "_s + _img_topic + "' to make sure it is being published to."; + ROS_WARN_NAMED("ROSCameraStream", _err_msg.c_str()); + return _ok; +} +// TODO: handle construction from img msg when there is no matching camera info msg + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +typename ROSCameraStream::CamFrameConstPtr +ROSCameraStream::getFrameFromTime(ros::Time desired_time) +{ + using mil_tools::operator "" _s; + + // Check bounds on time + if(desired_time < this->_start_time || desired_time > this->_end_time) + { + ROS_WARN_STREAM_THROTTLE_NAMED(1, "ROSCameraStream", + "ROSCameraStream: The camera frame you requested is outside the time range of the buffer."); + return nullptr; + } + + CamFrameConstPtr closest_in_time = this->operator[](0); + double min_abs_nsec_time_diff = fabs((this->operator[](0)->stamp() - desired_time).toNSec()); + double abs_nsec_time_diff = -1; + for(CamFrameConstPtr frame_ptr : this->_frame_ptr_circular_buffer) + { + abs_nsec_time_diff = fabs((frame_ptr->stamp() - desired_time).toNSec()); + if(abs_nsec_time_diff < min_abs_nsec_time_diff) + { + closest_in_time = frame_ptr; + min_abs_nsec_time_diff = abs_nsec_time_diff; + } + } + return closest_in_time; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +typename ROSCameraStream::CamFrameConstPtr +ROSCameraStream::operator[](int i) +{ + using mil_tools::operator "" _s; + + // Prevent adding new frames while frames are being accessed + _mtx.lock(); + + // shared_ptr to a dynamically allocated reference frame object + CamFrameConstPtr shared_ptr_to_const_frame; + + try + { + if(i >= 0) // regular access for non-negative indices + { + shared_ptr_to_const_frame = this->_frame_ptr_circular_buffer[i]; + } + else{ // reverse access for negative indices (ex. [-1] refers to the last element) + size_t past_last_idx = this->_frame_ptr_circular_buffer.end() - this->_frame_ptr_circular_buffer.begin(); // DBG + shared_ptr_to_const_frame = this->_frame_ptr_circular_buffer[past_last_idx + i]; // DBG + } + } + catch(std::exception &e) + { + auto err = "ROSCameraStream: The circular buffer index you are trying to acess is out of bounds:\n"_s + e.what(); + ROS_WARN_THROTTLE_NAMED(1, "ROSCameraStream", err.c_str()); + return nullptr; + } + + _mtx.unlock(); + return shared_ptr_to_const_frame; +} + +template +void +ROSCameraStream::_addFrame(CamFramePtr &new_frame_ptr) +{ + // Prevent accessing frames while new frames are being added + _mtx.lock(); + this->_frame_ptr_circular_buffer.push_front(new_frame_ptr); + _mtx.unlock(); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +template +void ROSCameraStream::_newFrameCb(const sensor_msgs::ImageConstPtr &image_msg_ptr) +{ + // Check if the topic name contains the string "rect" + bool rectified = _img_topic.find(std::string("rect")) != std::string::npos; + + // Create shared pointer to dynamically allocated Camera Frame object constructed from ros img msg + CamFramePtr new_frame_ptr{new CamFrame(image_msg_ptr, this->_cam_model_ptr, rectified, 1.0)}; + + // Add shared pointer to CameraFrame object to the circular buffer + _addFrame(new_frame_ptr); + + // Update the bounding timestamps for the frame sequence + this->_start_time = image_msg_ptr->header.stamp; + this->_end_time = this->_frame_ptr_circular_buffer.back()->stamp(); +} + +template +ROSCameraStream::~ROSCameraStream() +{ + img_sub.unsubscribe(); + _cb_queue.clear(); + _cb_queue.disable(); +} + +} // namespace mil_vision diff --git a/perception/mil_vision/include/mil_vision_lib/image_filtering.hpp b/perception/mil_vision/include/mil_vision_lib/image_filtering.hpp new file mode 100644 index 00000000..3b7b08cf --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/image_filtering.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include + +#include +#include + +#include + +namespace mil_vision +{ + +/* + Returns a version of the input kernel rotated about its center point. + kernel - original kernel + theta - anlgle in radians by which to rotate the kernel. Positive angles --> counterclockwise. + deg - OPTIONAL. Will assume that theta is given in degrees if set to true. + no_expand - OPTIONAL. Will leave the output size the same as the input size. Parts of the + original kernel may fall outside the output canvas after the rotation. +*/ +cv::Mat rotateKernel(const cv::Mat &kernel, float theta, bool deg=false, bool no_expand=false); + +/* + Creates a new kernel that is rotationally invariant by averaging rotated instances of the + original. + kernel - original kernel + rotations - OPTIONAL. The number of the times that the original kernel will be rotated before + the rotated versions are averaged together. The rotated kernels will be uniformly spread + angularly. +*/ +cv::Mat makeRotInvariant(const cv::Mat &kernel, int rotations=8); + +/* + Returns the minimum theta for which a version of the kernel that has been rotated + by theta radians will be approximately identical to the original. + kernel - input kernel + ang_res - OPTIONAL. The result will be a multiple of this angle + deg - OPTIONAL. The output will be in degrees instead of radias if set to true +*/ +float getRadialSymmetryAngle(const cv::Mat &kernel, float ang_res=0.1, bool deg=false); + +} // namespace mil_vision diff --git a/perception/mil_vision/include/mil_vision_lib/pcd_sub_pub_algorithm.hpp b/perception/mil_vision/include/mil_vision_lib/pcd_sub_pub_algorithm.hpp new file mode 100644 index 00000000..7ef4bd3e --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/pcd_sub_pub_algorithm.hpp @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include +#include + + +namespace mil_vision{ + +template +class PcdSubPubAlgorithm{ + /* + virtual base class for algorithms that subscribe to point cloud ROS topics, + operate on the clouds and publish output clouds to a different topic + */ + +public: + + // Type aliases + template using PCD = pcl::PointCloud; + + // Constructors and Destructors + + PcdSubPubAlgorithm(ros::NodeHandle nh, std::string input_pcd_topic, std::string output_pcd_topic) + : _nh(nh), _input_pcd_topic(input_pcd_topic), _output_pcd_topic(output_pcd_topic) + { + // Subscribe to point cloud topic + _cloud_sub = _nh.subscribe>(_input_pcd_topic, 1, &PcdSubPubAlgorithm::_cloud_cb, this); + + // Advertise output topic + _cloud_pub = _nh.advertise>(_output_pcd_topic, 1, true); + } + + // Check status methods + + bool activated() + { + return _active; + } + + bool ok() + { + return _ok && ros::ok(); + } + + // Set status methods + + void switchActivation() + { + _active = !_active; + } + + +protected: + + // runs algorithm pipeline when a new pcd msg is received + // virtual void cloud_cb(const typename PCD::ConstPtr &cloud_msg) = 0; // runs algorithm pipeline when a new pcd msg is received + virtual void _cloud_cb(const typename PCD::ConstPtr &cloud_msg) = 0; + + // Subscribing and storing input + ros::NodeHandle _nh; + std::string _input_pcd_topic; + ros::Subscriber _cloud_sub; + PCD _input_pcd2; + + // Storing result and publishing + std::string _output_pcd_topic; + ros::Publisher _cloud_pub; + PCD _output_pcd2; + + // Activation status + bool _active = false; + + // Error flag + bool _ok = false; + std::string _err_msg; +}; + +} //namespace mil_vision \ No newline at end of file diff --git a/perception/mil_vision/include/mil_vision_lib/point_cloud_algorithms.hpp b/perception/mil_vision/include/mil_vision_lib/point_cloud_algorithms.hpp new file mode 100644 index 00000000..295dddcf --- /dev/null +++ b/perception/mil_vision/include/mil_vision_lib/point_cloud_algorithms.hpp @@ -0,0 +1,70 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace mil_vision { + +class PcdSubPubAlgorithm{ + /* + virtual base class for algorithms that subscribe to point cloud ROS topics, + operate on the clouds and publish output clouds to a different topic + */ + typedef sensor_msgs::PointCloud2 PCD; + +public: + PcdSubPubAlgorithm(){}; + ~PcdSubPubAlgorithm(){}; + +protected: + virtual void cloud_cb(const PCD &cloud_msg ) = 0; // runs algorithm pipeline when a msg is received + PCD input_pcd; + PCD output_pcd; + ros::NodeHandle nh; + ros::Subscriber cloud_sub; + ros::Publisher cloud_pub; + std::string input_pcd_topic; + std::string output_pcd_topic; +}; + + +class PcdColorizer : public PcdSubPubAlgorithm{ + /* + This class takes adds color information to XYZ only point clouds if the + points in the cloud can be observed by a camera that takes color images + Note: needs accurate TF because it uses the ros tf package to transform + arbitrary point cloud frames into the frame of the color pinhole camera + Note: the rgb camera topic should be streaming rectified images + */ + + typedef sensor_msgs::PointCloud2 PCD; + +public: + PcdColorizer(ros::NodeHandle nh, std::string input_pcd_topic, std::string output_pcd_topic, std::string rgb_cam_topic, std::string rgb_cam_frame); + ~PcdColorizer(){} + void _transform_to_cam(); + void _color_pcd(); + +private: + void cloud_cb(const sensor_msgs::PointCloud2 &cloud_msg); + std::string rgb_cam_frame; + std::string rgb_cam_topic; + tf::TransformListener tf_listener; + PCD transformed_pcd; // input pcd transformed to the frame of the rgb camera + image_transport::ImageTransport img_transport {nh}; + image_transport::CameraSubscriber rgb_cam_sub; + + int seq = 0; + + Eigen::Matrix3f cam_intrinsics; + bool _intrinsics_set = false; + sensor_msgs::ImageConstPtr latest_frame_img_msg; + sensor_msgs::CameraInfoConstPtr latest_frame_info_msg; +}; + +} // namwspace nav diff --git a/perception/mil_vision/object_classification/HOG_descriptor.py b/perception/mil_vision/object_classification/HOG_descriptor.py new file mode 100644 index 00000000..74e6f9d7 --- /dev/null +++ b/perception/mil_vision/object_classification/HOG_descriptor.py @@ -0,0 +1,10 @@ +import cv2 + + +class HOGDescriptor(object): + + def __init__(self): + self.hog = cv2.HOGDescriptor((8, 8), (8, 8), (4, 4), (8, 8), 9) + + def get_descriptor(self, img): + return self.hog.compute(img) diff --git a/perception/mil_vision/object_classification/SVM_classifier.py b/perception/mil_vision/object_classification/SVM_classifier.py new file mode 100644 index 00000000..90213ce5 --- /dev/null +++ b/perception/mil_vision/object_classification/SVM_classifier.py @@ -0,0 +1,24 @@ +from sklearn import svm +import pickle + + +class SVMClassifier(object): + + def __init__(self): + self.clf = svm.SVC(probability=True) + self.number = 0 + + def classify(self, desc): + desc = desc.reshape(1, len(desc)) + probs = self.clf.predict_proba(desc) + probs = list(probs.flatten()) + p = max(probs) + i = probs.index(p) + return i, p + + def train(self, desc, clss): + self.clf.fit(desc, clss) + + def pickle(self, name): + with open(name, 'wb') as f: + pickle.dump(self, f) diff --git a/perception/mil_vision/object_classification/__init__.py b/perception/mil_vision/object_classification/__init__.py new file mode 100644 index 00000000..af106f01 --- /dev/null +++ b/perception/mil_vision/object_classification/__init__.py @@ -0,0 +1,5 @@ +from lidar_to_image import LidarToImage +from SVM_classifier import SVMClassifier +from HOG_descriptor import HOGDescriptor +from object_classification import * +from depickler import depicklify \ No newline at end of file diff --git a/perception/mil_vision/object_classification/depickler.py b/perception/mil_vision/object_classification/depickler.py new file mode 100644 index 00000000..e7245858 --- /dev/null +++ b/perception/mil_vision/object_classification/depickler.py @@ -0,0 +1,10 @@ +import pickle +import sys +import SVM_classifier + + +def depicklify(filename): + sys.modules['SVM_classifier'] = SVM_classifier + cl = pickle.load(open(filename, 'rb')) + del sys.modules['SVM_classifier'] + return cl diff --git a/perception/mil_vision/object_classification/depicklify.py b/perception/mil_vision/object_classification/depicklify.py new file mode 100644 index 00000000..577a7c16 --- /dev/null +++ b/perception/mil_vision/object_classification/depicklify.py @@ -0,0 +1,12 @@ +import pickle + + +class Depickle(object): + + def __init__(self): + mypickle = pickle.load(open("/home/tess/bags/color_train.py", 'rb')) + file = open('rois.txt', ) + for m in mypickle.bag_to_rois: + file.write(m) + for f in m: + file.write(f) diff --git a/perception/mil_vision/object_classification/lidar_to_image.py b/perception/mil_vision/object_classification/lidar_to_image.py new file mode 100644 index 00000000..8b79bff5 --- /dev/null +++ b/perception/mil_vision/object_classification/lidar_to_image.py @@ -0,0 +1,197 @@ +import txros +from twisted.internet import defer +from txros import util, tf +import navigator_tools as nt +from navigator_tools import CvDebug +from collections import Counter +from image_geometry import PinholeCameraModel +import sys +from collections import deque +from cv_bridge import CvBridge +from nav_msgs.msg import Odometry +from sensor_msgs.msg import Image, CameraInfo +import genpy +import cv2 +from mil_ros_tools import odometry_to_numpy +from navigator_msgs.srv import ObjectDBQuery, ObjectDBQueryRequest +from image_geometry import PinholeCameraModel +import numpy as np +___author___ = "Tess Bianchi" + +''' +Needs to be refactored to be generic and non depend on navigator +''' + +class LidarToImage(object): + + def __init__(self, nh, classes=None, dist=50): + self.MAX_SIZE = 74 + self.IMAGE_SIZE = 100 + self.max_dist = dist + self.bridge = CvBridge() + self.nh = nh + self.image_cache = deque() + self.pose = None + self.image = None + self.classes = classes + self.cam_info = None + self.busy = False + self.c = 0 + + self.debug = CvDebug(nh) + + @util.cancellableInlineCallbacks + def init_(self, cam): + image_sub = "/stereo/right/image_rect_color" + cam_info = "/stereo/right/camera_info" + + yield self.nh.subscribe(image_sub, Image, self._img_cb) + self._database = yield self.nh.get_service_client('/database/requests', ObjectDBQuery) + self._odom_sub = yield self.nh.subscribe('/odom', Odometry, + lambda odom: setattr(self, 'pose', odometry_to_numpy(odom)[0])) + self.cam_info_sub = yield self.nh.subscribe(cam_info, CameraInfo, self._info_cb) + self.tf_listener = tf.TransformListener(self.nh) + defer.returnValue(self) + + def _info_cb(self, info): + self.cam_info = info + + def _get_2d_points(self, points_3d): + # xmin, ymin, zmin = self._get_top_left_point(points_3d) + points_2d = map(lambda x: self.camera_model.project3dToPixel(x), points_3d) + return points_2d + + def _get_bounding_rect(self, points_2d, img): + xmin = np.inf + xmax = -np.inf + ymin = np.inf + ymax = -np.inf + h, w, r = img.shape + for i, point in enumerate(points_2d): + if(point[0] < xmin): + xmin = point[0] + if(point[0] > xmax): + xmax = point[0] + if(point[1] > ymax): + ymax = point[1] + if(point[1] < ymin): + ymin = point[1] + if xmin < 0: + xmin = 1 + if ymin < 0: + ymin = 1 + if xmax > w: + xmax = w - 1 + if ymax > h: + ymax = h - 1 + return xmin, ymin, xmax, ymax + + @util.cancellableInlineCallbacks + def get_object_rois(self, name=None): + req = ObjectDBQueryRequest() + if name is None: + req.name = 'all' + else: + req.name = name + obj = yield self._database(req) + + if obj is None or not obj.found: + defer.returnValue((None, None)) + rois = [] + ros_img = yield self._get_closest_image(obj.objects[0].header.stamp) + if ros_img is None: + defer.returnValue((None, None)) + img = self.bridge.imgmsg_to_cv2(ros_img, "mono8") + o = obj.objects[0] + + points_3d = yield self.get_3d_points(o) + points_2d_all = map(lambda x: self.camera_model.project3dToPixel(x), points_3d) + points_2d = self._get_2d_points(points_3d) + xmin, ymin, xmax, ymax = self._get_bounding_rect(points_2d, img) + xmin, ymin, xmax, ymax = int(xmin), int(ymin), int(xmax), int(ymax) + h, w, r = img.shape + if xmin < 0 or xmax < 0 or xmin > w or xmax > w or xmax - xmin == 0 or ymax - ymin == 0: + continue + if ymin < 0: + ymin = 0 + roi = img[ymin:ymax, xmin:xmax] + roi = self._resize_image(roi) + ret_obj = {} + ret_obj["id"] = o.id + ret_obj["points"] = points_2d_all + ret_obj["img"] = roi + ret_obj["time"] = o.header.stamp + ret_obj["name"] = o.name + ret_obj["bbox"] = [xmin, ymin, xmax, ymax] + rois.append(ret_obj) + defer.returnValue((img, rois)) + + def img_cb(self, image_ros): + """Add an image to the image cache.""" + self.image = image_ros + + if len(self.image_cache) > 100: + self.image_cache.popleft() + + self.image_cache.append(image_ros) + + @util.cancellableInlineCallbacks + def get_closest_image(self, time): + min_img = None + for i in range(0, 20): + min_diff = genpy.Duration(sys.maxint) + for img in self.image_cache: + diff = abs(time - img.header.stamp) + if diff < min_diff: + min_diff = diff + min_img = img + if min_img is not None: + defer.returnValue(min_img) + yield self.nh.sleep(.3) + + def _resize_image(self, img): + h, w, r = img.shape + if h > w: + nh = self.MAX_SIZE + nw = nh * w / h + else: + nw = self.MAX_SIZE + nh = nw * h / w + img = cv2.resize(img, (nw, nh)) + # return img + rep = np.ones(nw, dtype=np.int64) + reph = np.ones(nh, dtype=np.int64) + emtpy_slots = self.IMAGE_SIZE - nw + empty_slots_h = self.IMAGE_SIZE - nh + half_empty_slots = emtpy_slots / 2 + 1 + half_empty_slots_h = empty_slots_h / 2 + 1 + reph[0] = half_empty_slots_h + reph[-1] = half_empty_slots_h + rep[0] = half_empty_slots + rep[-1] = half_empty_slots + if emtpy_slots % 2 == 1: + rep[-1] += 1 + + if empty_slots_h % 2 == 1: + reph[-1] += 1 + + img = np.repeat(img, reph, axis=0) + return np.repeat(img, rep, axis=1) + + @txros.util.cancellableInlineCallbacks + def get_3d_points(self, perc_obj): + trans = yield self.my_tf.get_transform("/stereo_right_cam", "/enu", perc_obj.header.stamp) + + stereo_points = [] + for point in perc_obj.points: + stereo_points.append(np.array([point.x, point.y, point.z, 1])) + stereo_points = map(lambda x: trans.as_matrix().dot(x), stereo_points) + points = [] + for p in stereo_points: + if p[3] < 1E-15: + raise ZeroDivisionError + p[0] /= p[3] + p[1] /= p[3] + p[2] /= p[3] + points.append(p[:3]) + defer.returnValue(points) diff --git a/perception/mil_vision/object_classification/median_flow.py b/perception/mil_vision/object_classification/median_flow.py new file mode 100644 index 00000000..7d9ed1ca --- /dev/null +++ b/perception/mil_vision/object_classification/median_flow.py @@ -0,0 +1,209 @@ +import cv2 +import numpy as np +from collections import deque +import itertools +___author___ = "Tess Bianchi" + +class MedianFlow(object): + TRACKING_LENGTH = 3 + + def __init__(self, elimination_amount=.6, winSize=(15, 15), maxLevel=2): + self.prev_points = None + self.prev_frame = None + self.lk_params = dict(winSize=winSize, maxLevel=maxLevel, criteria=( + cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 2, 0.03)) + self.tracking_points = deque() + self.tracking_frames = deque() + self.bboxs = deque() + + self.elimination_amount = elimination_amount + self._is_effective = True + + self.bbox = None + self._curr_scale = 1 + self._amount_mdn_flow_tracked = 0 + + def init(self, frame, bbox, img=None, num_points=6): + ''' + Arguments: num_points is the square root of the number of that will be initially given to your image + ''' + if frame is None: + raise TypeError("The frame is invalid") + + x, y, w, h = bbox + + if(w <= 0 or h <= 0): + raise ValueError("Invalid bounding box") + + self.prev_frame = frame + + self.bbox = bbox + + px = np.linspace(x, x + w, num_points) + py = np.linspace(y, y + h, num_points) + px = px.astype(np.int32) + py = py.astype(np.int32) + p = np.array(list(itertools.product(px, py)), dtype=np.float32) + p = p.reshape(p.shape[0], 2) + + self.prev_points = p + + def stop_use(self): + self.tracking_points.clear() + self.tracking_frames.clear() + self.bboxs.clear() + self._amount_mdn_flow_tracked = 0 + self._is_effective = True + self.curr_scale = 1 + + def is_effective(self): + return self._is_effective + + def get_last_good_frame(self): + best = self.bboxs.popleft() + best_frame = self.tracking_frames.popleft() + x, y, w, h = best + _roi = best_frame[y:y + h, x:x + w] + return _roi, best + + def _calculate_forward_backward_error(self, prev_frame, prev_points): + # Get the last set of frame in the list, calulate optical flow from f(t) to f(t-TRACKING_LENGTH) + # for the number of frame in tracking frame + # Go backwards through the list because we want the most recent element first + for i in range(self.TRACKING_LENGTH - 1, -1, -1): + _frame = self.tracking_frames[i] + _points, status, err = cv2.calcOpticalFlowPyrLK(prev_frame, _frame, prev_points, None, **self.lk_params) + prev_frame = _frame + prev_points = _points + + # We now have f(t-TRACKING_LENGTH), get the euclidean distance from each + # of these points to the oldest points in tracking_points + old_points = self.tracking_points.popleft() + diff = np.subtract(old_points, prev_points) + diff_norm = np.linalg.norm(diff, axis=1) + + return diff, diff_norm + + def _eliminate_points(self, points, frame): + if frame is None: + raise TypeError("The frame is invalid") + + # Make sure there is enough frames in our list + if(len(self.tracking_points) < self.TRACKING_LENGTH): + self.tracking_points.append(points) + self.tracking_frames.append(frame) + return points + + diff, diff_norm = self._calculate_forward_backward_error(frame, points) + # Eliminate the points based on the fb error + msk = None + + # If this is the first time median flow is tracking these points, eliminate the top 60% of the points with the highest + # fb tracking error 60% is defined in self.elimination_amount) + if(self._amount_mdn_flow_tracked == 0): + lrgst_ind = np.argsort(diff_norm)[-int(len(diff_norm) * self.elimination_amount):] + msk = np.ones(len(diff_norm), dtype=bool) + msk[lrgst_ind] = False + else: + # Elimate points with a foward-backward error greater than .8 + high_val = diff_norm > .8 + diff = diff.T[0] + msk = np.ones(len(diff_norm), dtype=bool) + msk[high_val] = False + + # Eliminate these from the current points, also, from all previous points as well + points = points[msk] + for i, tp in enumerate(self.tracking_points): + self.tracking_points[i] = tp[np.array(msk)] + + self.prev_points = self.tracking_points[self.TRACKING_LENGTH - 2] + + # Update tracking values + self.tracking_points.append(points) + self.tracking_frames.popleft() + self.tracking_frames.append(frame) + self._amount_mdn_flow_tracked += 1 + + # Determine if this model is still effective + # If the mean fb error is > 2 and the amount of frames tracked is > 20, then the model has failed. + # Also if the number of points in the model is 2 and self._amount_mdn_flow_tracked > 20) or len(points) < 3): + self._is_effective = False + + return points + + def _update_bbox(self, curr_points): + # Update bounding dimensions + # Get the median dx and dy, this is how far the bounding box moves + mydiff = np.subtract(curr_points, self.prev_points) + a = mydiff.transpose() + x_d = np.median(a[0]) + y_d = np.median(a[1]) + x, y, w, h = self.bbox + + x_n = int(round(x + x_d)) + y_n = int(round(y + y_d)) + + # Scale is a little trickier, get every permutation of two points from previous points and current points + prev_comb = list(itertools.permutations(self.prev_points, 2)) + curr_comb = list(itertools.permutations(curr_points, 2)) + ratios = [] + + for i, val in enumerate(prev_comb): + if(i >= len(curr_comb)): + # This should never happen + raise ValueError('The previous points and current points are not synchronized') + prev_point_A = prev_comb[i][0] + prev_point_B = prev_comb[i][1] + curr_point_A = curr_comb[i][0] + curr_point_B = curr_comb[i][1] + + # Get the distance between every corresponsing pair of points + prev_dist = np.subtract(prev_point_A, prev_point_B) + curr_dist = np.subtract(curr_point_A, curr_point_B) + + # Dont want to divide by zero + if(np.linalg.norm(prev_dist) == 0): + continue + + # Get the ration of the current distance, to the previous distance + ratios.append(np.linalg.norm(curr_dist) / np.linalg.norm(prev_dist)) + + # Choose the median of these distances + scale = np.median(ratios) + + # Multiply the current scale with this scale + self._curr_scale *= scale + + w_n = w + h_n = h + + # If the scale is big or small enough (due to rounding errors) + if(self._curr_scale > 1.08 or self._curr_scale < .92): + w_n = int(round(w * self._curr_scale)) + h_n = int(round(h * self._curr_scale)) + self._curr_scale = 1 + + if(x_n < 0 or y_n < 0 or w_n < 0 or h_n < 0): + raise ValueError("The new bounding box has incorrect values") + + self.bbox = (x_n, y_n, w_n, h_n) + + def track(self, frame): + if frame is None: + raise TypeError("The frame is invalid") + + points, status, err = cv2.calcOpticalFlowPyrLK(self.prev_frame, frame, self.prev_points, None, **self.lk_params) + points = self._eliminate_points(points, frame) + try: + self._update_bbox(points) + except: + return None + + self.bboxs.append(self.bbox) + if(len(self.bboxs) > self.TRACKING_LENGTH): + self.bboxs.popleft() + + self.prev_points = points + self. prev_frame = frame + return self.bbox \ No newline at end of file diff --git a/perception/mil_vision/object_classification/object_classification.py b/perception/mil_vision/object_classification/object_classification.py new file mode 100644 index 00000000..0b16b603 --- /dev/null +++ b/perception/mil_vision/object_classification/object_classification.py @@ -0,0 +1,150 @@ + +from roi_generator import ROI_Collection +from mil_ros_tools import CvDebug, BagCrawler +import numpy as np +from HOG_descriptor import HOGDescriptor +from SVM_classifier import SVMClassifier +import pickle +import cv2 +from cv_bridge import CvBridge +___author___ = "Tess Bianchi" + + +class Config(object): + + def __init__(self): + self.classes = ["totem", "scan_the_code", "nothing", "shooter"] + self.classifier = SVMClassifier() + self.descriptor = HOGDescriptor() + self.bridge = CvBridge() + self.MAX_SIZE = 74 + self.IMAGE_SIZE = 100 + + def to_class(self, val): + return self.classes[val] + + def to_val(self, clss): + for i, c in enumerate(self.classes): + if c in clss: + return i + + def get_imgs(self, val): + roi = pickle.load(open(val, 'rb')) + imgs = [] + rois_vals = [] + rois = [] + print roi + + for b in roi.bag_to_rois.keys(): + frames = roi.bag_to_rois[b] + bc = BagCrawler(b) + topic = bc.image_topics[0] + bc_crawl = bc.crawl(topic) + print b + for frame in frames: + img = bc_crawl.next() + img = self.bridge.imgmsg_to_cv2(img, 'bgr8') + imgs.append(img) + a = [] + for clss in frame: + r = frame[clss] + myroi = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] + myroi = self._resize_image(myroi) + clss = self.to_val(clss) + rois.append((myroi, clss)) + a.append((r, myroi, clss)) + rois_vals.append(a) + return imgs, rois_vals, rois + + def _resize_image(self, img): + h, w, r = img.shape + if h > w: + nh = self.MAX_SIZE + nw = nh * w / h + else: + nw = self.MAX_SIZE + nh = nw * h / w + img = cv2.resize(img, (nw, nh)) + # return img + rep = np.ones(nw, dtype=np.int64) + reph = np.ones(nh, dtype=np.int64) + emtpy_slots = self.IMAGE_SIZE - nw + empty_slots_h = self.IMAGE_SIZE - nh + half_empty_slots = emtpy_slots / 2 + 1 + half_empty_slots_h = empty_slots_h / 2 + 1 + reph[0] = half_empty_slots_h + reph[-1] = half_empty_slots_h + rep[0] = half_empty_slots + rep[-1] = half_empty_slots + if emtpy_slots % 2 == 1: + rep[-1] += 1 + + if empty_slots_h % 2 == 1: + reph[-1] += 1 + + img = np.repeat(img, reph, axis=0) + return np.repeat(img, rep, axis=1) + + +class Training(object): + + def __init__(self, roi_file, output): + self.config = Config() + self.output = output + self.roi_file = roi_file + + def train(self): + descs = [] + classify = [] + imgs, roi_val, rois = self.config.get_imgs(self.roi_file) + for r in rois: + roi, clss = r + desc = self.config.descriptor.get_descriptor(roi) + desc = desc.flatten() + descs.append(desc) + classify.append(clss) + print clss + descs = np.array(descs) + classify = np.array(classify) + counts = dict((x, list(classify).count(x)) for x in set(classify)) + counts = dict((self.config.to_class(k), v) for k, v in counts.items()) + print counts + + self.config.classifier.train(descs, classify) + self.config.classifier.pickle("train.p") + +# class Classifier(object): + + +class ClassiferTest(object): + + def __init__(self, roi_file, class_file): + self.config = Config() + self.roi_file = roi_file + self.classifier = pickle.load(open(class_file, 'rb')) + self.debug = CvDebug() + + def classify(self): + print self.roi_file + imgs, roi_val, rois = self.config.get_imgs(self.roi_file) + for i, frames in enumerate(roi_val): + img = imgs[i] + draw = img.copy() + for roi in frames: + myroi, roi_img, tru_clss = roi + desc = self.config.descriptor.get_descriptor(roi_img) + desc = desc.flatten() + clss, prob = self.classifier.classify(desc) + clss = self.config.to_class(clss) + cv2.rectangle(draw, (myroi[0], myroi[1]), (myroi[0] + myroi[2], myroi[1] + myroi[3]), (0, 0, 255)) + cv2.putText(draw, clss + ": " + str(prob), (myroi[0], myroi[1]), 1, 1.0, (0, 255, 0)) + # self.debug.add_image(draw, topic="roi") + cv2.imshow("roi", draw) + cv2.waitKey(33) + +if __name__ == "__main__": + t = Training("roi_competition.p", "train_competition.p") + t.train() + print "done" + # c = ClassiferTest("val_roi.p", "train.p") + # c.classify() diff --git a/perception/mil_vision/object_classification/roi_generator.py b/perception/mil_vision/object_classification/roi_generator.py new file mode 100644 index 00000000..3980a7a1 --- /dev/null +++ b/perception/mil_vision/object_classification/roi_generator.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +from mil_ros_tools import BagCrawler +import argparse +from cv_bridge import CvBridge +import cv2 +import sys +import pickle +import os +import numpy as np +from median_flow import MedianFlow +___author___ = "Tess Bianchi" + + +class ROI_Collection(): + + def __init__(self): + self.bag_to_rois = {} + + def pickle(self, name): + pickle.dump(self, open(name, "wb")) + + +class ROI_Generator(object): + + def __init__(self): + self.folder = os.path.dirname(os.path.realpath(__file__)) + self.bridge = CvBridge() + self.roi_to_tracker = {} + + def get_roi(self, name): + file = open(self.folder + '/' + name, 'r') + for line in file: + line = line.replace('\n', '') + if len(line) == 0: + continue + x, y, w, h = line.split(" ") + yield x, y, w, h + + def create(self, bag, output, load): + self.rects = {} + self.sel_rect = None + bc = BagCrawler(bag) + self.rclk = False + self.lclk = False + topic = bc.image_topics[0] + self.crawl_bu = bc.crawl(topic=topic) + image = self.crawl_bu.next() + self.image = self.bridge.imgmsg_to_cv2(image, 'bgr8') + self.crawl = bc.crawl(topic=topic) + self.x, self.y = 0, 0 + self.rsel = True + self.output = output + w, h, r = self.image.shape + self.button_pressed = False + if load: + self.collection = pickle.load(open(self.folder + '/' + output, "rb")) + else: + self.collection = ROI_Collection() + + self.collection.bag_to_rois[bag] = [] + self.mycoll = self.collection.bag_to_rois[bag] + + self.window_name = 'segment' + cv2.namedWindow(self.window_name) + cv2.setMouseCallback(self.window_name, self.mouse_roi) + self.last_rec = None + + def setw(x): + if self.sel_rect is not None: + r = self.rects[self.sel_rect] + r[2] = x + + def seth(x): + if self.sel_rect is not None: + r = self.rects[self.sel_rect] + r[3] = x + + cv2.createTrackbar("width", self.window_name, 0, w, setw) + cv2.createTrackbar("height", self.window_name, 0, h, seth) + + def out_range(self, bbox): + h, w, r = self.image.shape + if bbox[0] < 0 or bbox[0] + bbox[2] > w: + return True + if bbox[1] < 0 or bbox[1] + bbox[3] > h: + return True + return False + + def go(self): + while self.x is None: + cv2.waitKey(33) + image = self.crawl.next() + self.image = self.bridge.imgmsg_to_cv2(image, 'bgr8') + doing = False + pause = True + while True: + if doing: + continue + doing = True + k = chr(cv2.waitKey(50) & 0xFF) + if k == 'q': + break + elif k == ' ': + pause = not pause + elif not pause and not self.rclk: + try: + image = self.crawl.next() + self.image = self.bridge.imgmsg_to_cv2(image, 'bgr8') + except StopIteration: + break + + remove = [] + for name in self.rects: + bbox = self.roi_to_tracker[name].track(self.image) + if bbox is None or self.out_range(bbox): + remove.append(name) + else: + self.rects[name] = [bbox[0], bbox[1], bbox[2], bbox[3]] + for name in remove: + self.rects.pop(name) + self.roi_to_tracker.pop(name) + r = dict(self.rects) + self.mycoll.append(r) + clone = self.image.copy() + for key in self.rects.keys(): + r = self.rects[key] + color = (255, 0, 0) + if key == self.sel_rect: + color = (0, 255, 0) + + cv2.rectangle(clone, (r[0], r[1]), (r[0] + r[2], r[1] + r[3]), color, 2) + cv2.putText(clone, key, (r[0], r[1]), 1, 1.0, (255, 0, 0)) + + cv2.imshow(self.window_name, clone) + doing = False + self.collection.pickle(self.output) + + def mouse_roi(self, event, x, y, flags, params): + if event == cv2.EVENT_RBUTTONDOWN: + self.rclk = not self.rclk + self.sel_rect = None + for name in self.rects: + r = self.rects[name] + self.roi_to_tracker[name] = MedianFlow() + self.roi_to_tracker[name].init(self.image, r) + return + if self.rclk: + if event == cv2.EVENT_LBUTTONDOWN and flags == 48: # 16: # pressing shift, remove box + if len(self.rects) > 0: + r = min(self.rects.items(), key=lambda rect: np.linalg.norm( + np.array([rect[1][0], rect[1][1]]) - np.array([x, y]))) + r = r[0] + self.rects.pop(r) + self.roi_to_tracker.pop(r) + self.sel_rect = None + elif event == cv2.EVENT_LBUTTONDOWN and flags == 40: # 8: # pressing cntrl, add box + name = raw_input('Enter name of object: ') + if name == "skip": + return + r = [20, 20, 20, 20] + self.rects[name] = r + elif event == cv2.EVENT_LBUTTONDOWN: + self.lclk = not self.lclk + if not self.lclk: + if self.sel_rect is not None: + r = self.rects[self.sel_rect] + self.sel_rect = None + else: + if len(self.rects) > 0: + self.sel_rect = min(self.rects.items(), key=lambda rect: np.linalg.norm( + np.array([rect[1][0], rect[1][1]]) - np.array([x, y]))) + self.sel_rect = self.sel_rect[0] + r = self.rects[self.sel_rect] + cv2.setTrackbarPos("width", self.window_name, r[2]) + cv2.setTrackbarPos("height", self.window_name, r[3]) + + elif event == cv2.EVENT_MOUSEMOVE: + self.x, self.y = x, y + if self.sel_rect is not None: + r = self.rects[self.sel_rect] + self.rects[self.sel_rect][0:4] = [self.x, self.y, r[2], r[3]] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('bag', type=str, help='The bag you would like to use') + parser.add_argument('name', type=str, help='The name of the output file') + parser.add_argument('--load', action='store_true', help='The name of the output file') + args = parser.parse_args(sys.argv[1:]) + + roi = ROI_Generator() + roi.create(args.bag, args.name, args.load) + roi.go() diff --git a/perception/mil_vision/package.xml b/perception/mil_vision/package.xml new file mode 100644 index 00000000..708fcf4d --- /dev/null +++ b/perception/mil_vision/package.xml @@ -0,0 +1,38 @@ + + + mil_vision + 2.0.0 + Nodes and libraries used for computer vision perception on NaviGator + + David Soto + MIT + catkin + roscpp + roscpp + rospy + rospy + image_transport + image_transport + image_geometry + image_geometry + cv_bridge + cv_bridge + mil_msgs + mil_msgs + mil_tools + mil_tools + message_generation + message_runtime + std_msgs + std_msgs + geometry_msgs + geometry_msgs + sensor_msgs + sensor_msgs + libpcl-all + tf2_sensor_msgs + tf2_sensor_msgs + tf2_geometry_msgs + pcl_ros + pcl_ros + diff --git a/perception/mil_vision/ros_tools/easy_thresh.py b/perception/mil_vision/ros_tools/easy_thresh.py new file mode 100755 index 00000000..584143f8 --- /dev/null +++ b/perception/mil_vision/ros_tools/easy_thresh.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# PYTHON_ARGCOMPLETE_OK + +import argcomplete +import sys +import argparse +import rospy + +all_topics = rospy.get_published_topics() +topics = [topic[0] for topic in all_topics if topic[1] == 'sensor_msgs/Image'] + +usage_msg = ("Name an image topic, we'll subscribe to it and grab the first image we can. " + + "Then click a rectangle around the area of interest") +desc_msg = "A tool for making threshold determination fun!" + +parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) +parser.add_argument(dest='topic_name', + help="The topic name you'd like to listen to", + choices=topics) +parser.add_argument('--hsv', action='store_true', + help="Would you like to look at hsv instead of bgr?" + ) + +argcomplete.autocomplete(parser) +args = parser.parse_args(sys.argv[1:]) +if args.hsv: + print 'Using HSV instead of bgr' +prefix = 'hsv' if args.hsv else 'bgr' + +# Importing these late so that argcomplete can run quickly +# NEEDS TO NOT BE DEPENDENT ON sub8 SOON. MOVE THESE FUNCTIONS TO MIL_VISION +from sub8_vision_tools import visual_threshold_tools # noqa +from mil_ros_tools.image_helpers import Image_Subscriber # noqa +import cv2 # noqa +import numpy as np # noqa +import os # noqa +from sklearn import cluster # noqa +os.system("export ETS_TOOLKIT=qt4") + + +class Segmenter(object): + def __init__(self): + self.is_done = False + self.corners = [] + self.state = 0 + + def mouse_cb(self, event, x, y, flags, param): + if event == cv2.EVENT_LBUTTONDOWN: + if not self.is_done: + print 'click' + self.corners.append(np.array([x, y])) + self.state += 1 + if self.state >= 4: + print 'done' + self.is_done = True + self.state = 0 + + if event == cv2.EVENT_RBUTTONDOWN: + pass + + def segment(self): + while(not self.is_done and not rospy.is_shutdown()): + if cv2.waitKey(50) & 0xFF == ord('q'): + break + + self.is_done = False + rect = cv2.minAreaRect(np.array([np.array(self.corners)])) + box = cv2.cv.BoxPoints(rect) + box = np.int0(box) + return box + + +class ImageGetter(object): + def __init__(self, topic_name='/down/left/image_raw'): + self.sub = Image_Subscriber(topic_name, self.get_image) + + print 'getting topic', topic_name + self.frame = None + self.done = False + + def get_image(self, msg): + self.frame = msg + self.done = True + + def wait_for_image(self): + while not self.done and not rospy.is_shutdown(): + if cv2.waitKey(50) & 0xFF == ord('q'): + exit() + + +if __name__ == '__main__': + rospy.init_node('easy_thresh') + + # Do the import after arg parse + + ig = ImageGetter(args.topic_name) + ig.wait_for_image() + print 'Got image' + frame_initial = np.copy(ig.frame) + + cv2.namedWindow("color") + seg = Segmenter() + cv2.setMouseCallback("color", seg.mouse_cb) + + frame_unblurred = frame_initial[::2, ::2, :] + frame = frame_unblurred + + if args.hsv: + analysis_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + else: + analysis_image = frame + + draw_image = np.copy(frame_unblurred) + seg_image = np.zeros_like(frame_unblurred[:, :, 0]) + + cv2.imshow("color", frame_unblurred) + + box = seg.segment() + print 'finished' + + cv2.drawContours(seg_image, [box], 0, 1, -2) + + hsv_in_box = analysis_image[seg_image.astype(np.bool)] + hsv_list = np.reshape(hsv_in_box, (-1, 3)) + + clust = cluster.KMeans(n_clusters=2) + print 'done clustering' + + clust.fit(hsv_list) + + hsv_dsamp = hsv_list + labels_dsamp = clust.labels_ + + visual_threshold_tools.points_with_labels( + hsv_dsamp[:, 0], + hsv_dsamp[:, 1], + hsv_dsamp[:, 2], + labels_dsamp, + scale_factor=5.0, + ) + + thresholder = visual_threshold_tools.make_extent_dialog( + ranges=visual_threshold_tools.color_ranges[prefix], + image=analysis_image + ) + + while not rospy.is_shutdown(): + if cv2.waitKey(50) & 0xFF == ord('q'): + break + + ranges = thresholder.ranges + labels = visual_threshold_tools.np_inrange(hsv_dsamp, ranges[:, 0], ranges[:, 1]) + + # Print out thresholds that can be put in the configuration yaml + low = ranges[:, 0] + print ' {}_low: [{}, {}, {}]'.format(prefix, low[0], low[1], low[2]) + + high = ranges[:, 1] + print ' {}_high: [{}, {}, {}]'.format(prefix, high[0], high[1], high[2]) + + print 'np.' + repr(ranges) + + cv2.destroyAllWindows() diff --git a/perception/mil_vision/ros_tools/image_dumper.py b/perception/mil_vision/ros_tools/image_dumper.py new file mode 100755 index 00000000..317db72a --- /dev/null +++ b/perception/mil_vision/ros_tools/image_dumper.py @@ -0,0 +1,155 @@ +#!/usr/bin/python + +import os +import re +import sys +import fnmatch +import argparse +from tqdm import tqdm + +import cv2 +import rospy +import roslib +import rosbag + +from cv_bridge import CvBridge, CvBridgeError + +# encoding=utf8 +reload(sys) +sys.setdefaultencoding('utf8') + + +class ImageHandler: + def __init__(self, filepath, savepath, bag): + self.bag = bag + self.msgs = 0 # Used to show remaining frames + self.image_index = 0 + self.image_topics = [] + self.bridge = CvBridge() + self.bagname = filepath.split('/')[-1] # Returns ../../bagname.bag + + if savepath is not None: + # Alternative save path (i.e. want to save to ext. drive) + if savepath[-1] != '/': + savepath += '/' + self.working_dir = savepath + self.bagname.split('.')[0]\ + + '_images' + else: + # Save path defaults to launch path + self.working_dir = filepath.split('.')[0] + '_images' + + if not os.path.exists(self.working_dir): + os.makedirs(self.working_dir) + + print '\033[1m' + self.bagname + ':\033[0m' + print 'Saving images to: ', self.working_dir + + self.parse_bag() + self.save_images() + + def parse_bag(self): + types = [] + bag_msg_cnt = [] + topic_status = False + topics = bag.get_type_and_topic_info()[1].keys() + + for i in range(0, len(bag.get_type_and_topic_info()[1].values())): + types.append(bag.get_type_and_topic_info()[1].values()[i][0]) + bag_msg_cnt.append(bag.get_type_and_topic_info()[1].values()[i][1]) + + topics = zip(topics, types, bag_msg_cnt) + + for topic, type, count in topics: + # Want to ignore image topics other than /image_raw + # TODO: Make this changeable + match = re.search(r'\mono|rect|theora|color\b', topic) + if match: + pass + elif type == 'sensor_msgs/Image': + if topic_status is False: + print 'Topic(s):' + topic_status = True + print ' ' + topic + self.image_topics.append(topic) + self.msgs = self.msgs + count + + def save_images(self): + # TODO: Add ability to pickup where it last left off + with tqdm(total=self.msgs) as pbar: + for topic, msg, t in self.bag.read_messages( + topics=self.image_topics): + try: + cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") + cv2.imwrite(self.working_dir + '/' + + str(self.image_index) + '.png', cv_image) + self.image_index = self.image_index + 1 + except CvBridgeError, e: + print e + pbar.update(1) + self.bag.close() + print self.image_index + 1, 'images saved to', self.working_dir, '\n' + +if __name__ == '__main__': + + desc_msg = ('Automated tool that traverses a directory dumping all of' + + 'the images from all non-rectificed, non-mono, and non-' + + 'theora topics found in all of the ROS bag files encountered.') + + parser = argparse.ArgumentParser(description=desc_msg) + + parser.add_argument('-f', '--filepath', + help='File path containing the ROS bags') + parser.add_argument('-s', '--savepath', + help='Path where the images will be saved') + parser.add_argument('-r', '--resize', + help='Resolution to resize images to') + + args = parser.parse_args() + + # TODO: Path validation can be moved to ImageHandler class + if args.filepath is not None: + filepath = args.filepath + else: + print 'No bag source provided' + filepath = sys.path[0] + + if args.savepath is not None: + savepath = args.savepath + print 'Setting save path to:', savepath + else: + savepath = None + + if args.resize: + # We're not ready for the future yet + pass + + print '\033[1mFilepath:', filepath + '\033[0m' # Print working directory + + matches = [] + bag_count = 0 + + if filepath[-4:] == '.bag': + bag = rosbag.Bag(filepath) + ImageHandler(filepath, savepath, bag) + else: + + # Generate list of file paths containing bag files + for root, dirnames, filenames in os.walk(filepath): + for filename in fnmatch.filter(filenames, '*.bag'): + if not filename.startswith('.'): + matches.append(os.path.join(root, filename)) + + print '\033[1m' + str(len(matches)) + ' bags found\033[0m\n' + + # Iterate through found bags + for bag_dir in matches: + bag = rosbag.Bag(bag_dir) + try: + ImageHandler(bag_dir, savepath, bag) + bag_count = bag_count + 1 + except rospy.ROSInterruptException: + pass + + print 'Processed', bag_count, 'bags.\n' + + print "Done!" diff --git a/perception/mil_vision/ros_tools/on_the_fly_thresholder.py b/perception/mil_vision/ros_tools/on_the_fly_thresholder.py new file mode 100755 index 00000000..5a4b0119 --- /dev/null +++ b/perception/mil_vision/ros_tools/on_the_fly_thresholder.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +from txros import util, NodeHandle +from twisted.internet import defer, reactor +from sensor_msgs.msg import Image, CompressedImage +from cv_bridge import CvBridge, CvBridgeError + +from mil_ros_tools import image_helpers +import numpy as np +import argcomplete +import sys +import argparse +import cv2 + + +class Trackbars(object): + ''' + Keeps track of the different colorspace trackbars + ''' + def __init__(self, thresh_type): + self.thresh_type = list(thresh_type) + [cv2.createTrackbar(name + '_high', 'parameters', 0, 255 if name != 'h' else 179, lambda a: a) for name in self.thresh_type] + [cv2.createTrackbar(name + '_low', 'parameters', 0, 255 if name != 'h' else 179, lambda a: a) for name in self.thresh_type] + + cv2.waitKey(10) + + def get_bounds(self): + upper_bounds = np.array([cv2.getTrackbarPos(name + '_high', 'parameters') for name in self.thresh_type]) + lower_bounds = np.array([cv2.getTrackbarPos(name + '_low', 'parameters') for name in self.thresh_type]) + return lower_bounds, upper_bounds + + +class Thresholder(object): + ''' + Does the thresholding and manages the windows associated with that + ''' + def __init__(self, img, thresh_type='bgr'): + cv2.namedWindow('parameters', cv2.WINDOW_NORMAL) + cv2.namedWindow('mask') + cv2.imshow('mask', np.zeros_like(img[:,:,0])) + + self.image = img + + self.thresh_type = thresh_type + self.trackbars = Trackbars(thresh_type) + + self.lower = None + self.upper = None + + def update_mask(self): + self.lower, self.upper = self.trackbars.get_bounds() + self.mask = cv2.inRange(self.image if self.thresh_type == 'bgr' else cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV), + self.lower, self.upper) + cv2.imshow("mask", self.mask) + + def to_dict(self): + lower = map(float, self.lower) + upper = map(float, self.upper) + return {'color_space':self.thresh_type, 'ranges': {'lower': lower, 'upper': upper}, 'invert': False} + + +@util.cancellableInlineCallbacks +def main(param_prefix, args): + nh = yield NodeHandle.from_argv("on_the_fly_mission_runner", anonymous=True) + + image_sub = yield nh.subscribe(args.topic_name, Image) + img = yield util.wrap_timeout(image_sub.get_next_message(), 5).addErrback(errback) + + np_img = image_helpers.get_image_msg(img, "bgr8") + cv2.imshow(args.topic_name, np_img) + t = Thresholder(np_img, 'hsv' if args.hsv else 'bgr') + + k = 0 + while k != ord('q'): # q to quit without saving + t.update_mask() + k = cv2.waitKey(50) & 0xFF + + if k == ord('s'): # s to save parameters + print "Saving params:" + temp = t.to_dict() + print temp + nh.set_param(param_prefix, temp) + break + + cv2.destroyAllWindows() + reactor.stop() + + +def errback(e): + '''Just for catching errors''' + print "Error when looking for image." + print " - You probably entered the wrong image topic." + reactor.stop() + + +def do_parsing(): + usage_msg = "Useful for doing on-the-fly color thresholding." + desc_msg = "Pass the name of the topic to listen to and the parameter family to set." + + parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) + parser.add_argument(dest='topic_name', + help="The topic name you'd like to listen to.") + parser.add_argument(dest='parameter_family', + help="This script will set the rosparams: \n\t `parameter_family`/bgr_high \n\t `parameter_family`/bgr_low \n OR \ + \n\t `parameter_family`/hsv_high \n\t `parameter_family`/hsv_high \n (depending on --hsv)") + parser.add_argument('--hsv', action='store_true', + help="Use HSV instead of BGR") + + args = parser.parse_args(sys.argv[1:]) + print 'Using {}'.format('HSV' if args.hsv else 'BGR') + + return args.parameter_family, args + + +if __name__ == '__main__': + param_prefix, args = do_parsing() + reactor.callWhenRunning(main, param_prefix, args) + reactor.run() diff --git a/perception/mil_vision/setup.py b/perception/mil_vision/setup.py new file mode 100644 index 00000000..29a9ec75 --- /dev/null +++ b/perception/mil_vision/setup.py @@ -0,0 +1,11 @@ +# ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD + +from distutils.core import setup +from catkin_pkg.python_setup import generate_distutils_setup + +# fetch values from package.xml +setup_args = generate_distutils_setup( + packages=[''], +) + +setup(**setup_args) diff --git a/perception/mil_vision/src/camera_lidar_transformer.cpp b/perception/mil_vision/src/camera_lidar_transformer.cpp new file mode 100644 index 00000000..019d56b2 --- /dev/null +++ b/perception/mil_vision/src/camera_lidar_transformer.cpp @@ -0,0 +1,212 @@ +#include "camera_lidar_transformer.hpp" + +ros::Duration CameraLidarTransformer::MAX_TIME_ERR = ros::Duration(0,6E7); +CameraLidarTransformer::CameraLidarTransformer() + : nh(ros::this_node::getName()), + tfBuffer(), + tfListener(tfBuffer, nh), + lidarSub(nh, "/velodyne_points", 10), + lidarCache(lidarSub, 10), + camera_info_received(false) + #ifdef DO_ROS_DEBUG + , + image_transport(nh) + #endif +{ + #ifdef DO_ROS_DEBUG + points_debug_publisher = image_transport.advertise("points_debug", 1); + pubMarkers = nh.advertise("markers_debug", 10); + #endif + nh.param("camera_info_topic",camera_info_topic,"/right/right/camera_info"); + cameraInfoSub = nh.subscribe(camera_info_topic, 1, &CameraLidarTransformer::cameraInfoCallback, this); + std::string camera_to_lidar_transform_topic; + nh.param("camera_to_lidar_transform_topic",camera_to_lidar_transform_topic,"transform_camera"); + transformServiceServer = nh.advertiseService(camera_to_lidar_transform_topic, &CameraLidarTransformer::transformServiceCallback, this); +} + +void CameraLidarTransformer::cameraInfoCallback(sensor_msgs::CameraInfo info) +{ + camera_info = info; + cam_model.fromCameraInfo(camera_info); + camera_info_received = true; +} + +bool CameraLidarTransformer::inCameraFrame(cv::Point2d& point) +{ + return point.x > 0 && point.x < camera_info.width && + point.y > 0 && point.y < camera_info.height; +} + +void CameraLidarTransformer::drawPoint(cv::Mat& mat, cv::Point2d& point, cv::Scalar color) +{ + if (point.x - 20 > 0 && point.x + 20 < mat.cols && point.y - 20 > 0 && point.y + 20 < mat.rows) + cv::circle(mat, point, 3, color, -1); +} + +bool CameraLidarTransformer::transformServiceCallback(mil_msgs::CameraToLidarTransform::Request &req, mil_msgs::CameraToLidarTransform::Response &res) +{ + if (!camera_info_received) + { + res.success = false; + res.error = "NO CAMERA INFO"; + return true; + } + + if (camera_info.header.frame_id != req.header.frame_id) + { + res.success = false; + res.error = "DIFFERENT FRAME ID THAN SUBSCRIBED CAMERA"; + return true; + } + + visualization_msgs::MarkerArray markers; + std::vector nearbyLidar = lidarCache.getInterval(req.header.stamp-MAX_TIME_ERR, req.header.stamp+MAX_TIME_ERR); + sensor_msgs::PointCloud2ConstPtr scloud; + ros::Duration minErr = ros::Duration(5000,0); + for (auto& pc : nearbyLidar) + { + ros::Duration thisErr = pc->header.stamp - req.header.stamp; + if (thisErr < minErr) + { + scloud = pc; + minErr = thisErr; + } + } + if (!scloud) { + res.success = false; + res.error = mil_msgs::CameraToLidarTransform::Response::CLOUD_NOT_FOUND; + return true; + } + if (!tfBuffer.canTransform(req.header.frame_id, "velodyne", ros::Time(0), ros::Duration(1))) + { + res.success = false; + res.error = "NO TRANSFORM"; + return true; + } + geometry_msgs::TransformStamped transform = tfBuffer.lookupTransform(req.header.frame_id, "velodyne", ros::Time(0)); + sensor_msgs::PointCloud2 cloud_transformed; + tf2::doTransform(*scloud, cloud_transformed, transform); + + #ifdef DO_ROS_DEBUG + cv::Mat debug_image(camera_info.height, camera_info.width, CV_8UC3, cv::Scalar(0)); + cv::circle(debug_image, cv::Point(req.point.x, req.point.y), 8, cv::Scalar(255, 0, 0), -1); + #endif + + double minDistance = std::numeric_limits::max(); + pcl::PointCloud cloud; + pcl::fromROSMsg(cloud_transformed, cloud); + std::vector indices; + for (unsigned index = 0; index < cloud.size(); index++) + { + pcl::PointXYZ& p = cloud[index]; + if (p.z < 0 || p.z > 30) continue; + cv::Point2d point = cam_model.project3dToPixel(cv::Point3d(p.x, p.y, p.z)); + if (inCameraFrame(point)) + { + #ifdef DO_ROS_DEBUG + visualization_msgs::Marker marker_point; + marker_point.header = req.header; + marker_point.header.seq = 0; + marker_point.header.frame_id = req.header.frame_id; + marker_point.id = index; + marker_point.type = visualization_msgs::Marker::CUBE; + marker_point.pose.position.x = p.x; + marker_point.pose.position.y = p.y; + marker_point.pose.position.z = p.z; + marker_point.scale.x = 0.1; + marker_point.scale.y = 0.1; + marker_point.scale.z = 0.1; + marker_point.color.a = 1.0; + marker_point.color.r = 0.0; + marker_point.color.g = 1.0; + marker_point.color.b = 0.0; + drawPoint(debug_image, point); + #endif + double distance = sqrt(pow(point.x - req.point.x ,2) + pow(point.y - req.point.y,2) ); //Distance (2D) from request point to projected lidar point + if (distance < req.tolerance) + { + geometry_msgs::Point geo_point; + geo_point.x = p.x; + geo_point.y = p.y; + geo_point.z = p.z; + if (distance < minDistance) + { + res.closest = geo_point; + minDistance = distance; + } + indices.push_back(index); + + res.transformed.push_back(geo_point); + + #ifdef DO_ROS_DEBUG + drawPoint(debug_image, point, cv::Scalar(0, 255, 0)); + marker_point.color.r = 1.0; + marker_point.color.g = 0.0; + marker_point.color.b = 0.0; + #endif + } + #ifdef DO_ROS_DEBUG + markers.markers.push_back(marker_point); + #endif + + } + } + if (res.transformed.size() > 0) { + float x,y,z,n; + pcl::NormalEstimation ne; + ne.computePointNormal (cloud, indices, x, y, z, n); + Eigen::Vector3d normal_vector = Eigen::Vector3d(x,y,z).normalized(); + res.normal.x = -normal_vector(0, 0); + res.normal.y = -normal_vector(1, 0); + res.normal.z = -normal_vector(2, 0); + + #ifdef DO_ROS_DEBUG + //Add a marker for the normal to the plane + geometry_msgs::Point sdp_normalvec_ros; + sdp_normalvec_ros.x = res.closest.x + res.normal.x; + sdp_normalvec_ros.y = res.closest.y + res.normal.y; + sdp_normalvec_ros.z = res.closest.z + res.normal.z; + visualization_msgs::Marker marker_normal; + marker_normal.header = req.header; + marker_normal.header.seq = 0; + marker_normal.header.frame_id = req.header.frame_id; + marker_normal.id = 3000; + marker_normal.type = visualization_msgs::Marker::ARROW; + marker_normal.points.push_back(res.closest); + marker_normal.points.push_back(sdp_normalvec_ros); + marker_normal.scale.x = 0.1; + marker_normal.scale.y = 0.5; + marker_normal.scale.z = 0.5; + marker_normal.color.a = 1.0; + marker_normal.color.r = 0.0; + marker_normal.color.g = 0.0; + marker_normal.color.b = 1.0; + markers.markers.push_back(marker_normal); + #endif + + res.distance = res.closest.z; + res.success = true; + } else { + res.error = mil_msgs::CameraToLidarTransform::Response::NO_POINTS_FOUND; + res.success = false; + } + +#ifdef DO_ROS_DEBUG + //Publish 3D debug market + pubMarkers.publish(markers); + //Publish debug image + cv_bridge::CvImage ros_debug_image; + ros_debug_image.encoding = "bgr8"; + ros_debug_image.image = debug_image.clone(); + points_debug_publisher.publish(ros_debug_image.toImageMsg()); +#endif + + return true; +} + +int main (int argc, char** argv) +{ + ros::init (argc, argv, "camera_lidar_transformer"); + CameraLidarTransformer transformer; + ros::spin (); +} diff --git a/perception/mil_vision/src/camera_stream_demo.cpp b/perception/mil_vision/src/camera_stream_demo.cpp new file mode 100755 index 00000000..adbab43a --- /dev/null +++ b/perception/mil_vision/src/camera_stream_demo.cpp @@ -0,0 +1,39 @@ +#include +#include +#include +#include // dont forget this include for camera stream functionality + +using namespace std; + +int main(int argc, char **argv) +{ + // Node setup + string cam_topic {"stereo/left/image_rect_color"}; + size_t history_length{200}; + ros::init(argc, argv, "camera_stream_demo"); + ros::NodeHandle nh; + + // Template argument should be cv::Vec3b for color images or uint8_t + // For grayscale images + mil_vision::ROSCameraStream left_cam_stream(nh, history_length); // Constructs empty inactive + // camera stream object + if(!left_cam_stream.init(cam_topic)) // Initializes object, if sucessful, object will automatically + return -1; // store a history of images published to cam_topic in its buffer + + // Display most recent and oldest frame in the buffer + while(cv::waitKey(100) == -1 && left_cam_stream.ok()) + { + size_t size = left_cam_stream.size(); + if(size == 200) + { + cv::imshow("newest_frame", left_cam_stream[0]->image()); + cv::imshow("oldest_frame", left_cam_stream[-1]->image()); + + // It is also possible to request a frame taken at a specific time + /* + Mat frame_at_time_t = left_cam_stream.getFrameFromTime(desired_time)->image(); + */ + } + } + +} diff --git a/perception/mil_vision/src/mil_vision_lib/active_contours.cpp b/perception/mil_vision/src/mil_vision_lib/active_contours.cpp new file mode 100644 index 00000000..a5fb1006 --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/active_contours.cpp @@ -0,0 +1,253 @@ +#include + +using namespace std; +using namespace cv; + +namespace mil_vision +{ + +namespace Perturbations +{ + +void initPerturbationCache() +{ + std::unordered_map seq_to_mod_border_idxs; + seq_to_mod_border_idxs[0] = 0; + seq_to_mod_border_idxs[1] = 1; + seq_to_mod_border_idxs[2] = 2; + seq_to_mod_border_idxs[3] = 3; + seq_to_mod_border_idxs[4] = 4; + seq_to_mod_border_idxs[5] = 12; + seq_to_mod_border_idxs[6] = 20; + seq_to_mod_border_idxs[7] = 28; + seq_to_mod_border_idxs[8] = 36; + seq_to_mod_border_idxs[9] = 35; + seq_to_mod_border_idxs[10] = 34; + seq_to_mod_border_idxs[11] = 33; + seq_to_mod_border_idxs[12] = 32; + seq_to_mod_border_idxs[13] = 24; + seq_to_mod_border_idxs[14] = 16; + seq_to_mod_border_idxs[15] = 8; + for(uint8_t entry = 0; entry < 25; entry++) + { + for(uint8_t exit = 0; exit < 25; exit++) + { + if(entry > exit || exit - entry == 1 || (exit + 16) - entry == 1) + continue; + vector perturbation_list; + vector occupied_squares; + auto in = seq_to_mod_border_idxs[entry]; + auto out = seq_to_mod_border_idxs[exit]; + growRoute(perturbation_list, occupied_squares, in, out); // Appends all possible paths from entry point to + } // exit point at the corresponding cache position + } +} + + +vector> getPerturbations(uint8_t entry, uint8_t exit) +{ + auto key = entry <= exit? make_pair(entry, exit) : make_pair(exit, entry); + auto perturbations = perturbation_cache[key]; + if(exit < entry) + for(auto& route : perturbations) + reverse(route.begin(), route.end()); + return perturbations; +} + +uint8_t getIdxFromPoint(Point2i hood_point) +{ + uint8_t idx = hood_point.y * 8; + idx += hood_point.x; + return idx; +} + +Point2i getPointFromIdx(uint8_t idx) +{ + auto x = idx % 8; + auto y = idx / 8; + return std::move(Point2i(x, y)); +} + +vector getPointList(vector& idx_list) +{ + vector point_list; + for(auto& idx : idx_list) + point_list.push_back(getPointFromIdx(idx)); + + for(auto& pt : point_list) + { + pt.x -= 2; + pt.y -= 2; + } + + return point_list; +} + +vector getHoodIdxs(uint8_t idx, bool include_border) +{ + vector hood; + hood.push_back(idx - 8 - 1); + hood.push_back(idx - 8); + hood.push_back(idx - 8 + 1); + hood.push_back(idx - 1); + hood.push_back(idx + 1); + hood.push_back(idx + 8 - 1); + hood.push_back(idx + 8); + hood.push_back(idx + 8 + 1); + + if(include_border) + hood = vector(hood.begin(), remove_if(hood.begin(), hood.end(), + [](uint8_t x){return x < 0 || x >= 39 || x%8 > 4; })); + else + hood = vector(hood.begin(), remove_if(hood.begin(), hood.end(), + [](uint8_t x){return x < 0 || x >= 39 || x/8 == 0 || x/8 == 4 || x%8 == 0 || x%8 > 3; })); + + return hood; +} + +bool isNeighbor(uint8_t idx1, uint8_t idx2) +{ + auto hood = getHoodIdxs(idx1, true); + return find(hood.begin(), hood.end(), idx2) != hood.end(); +} + +bool isNeighborPoint(const Point2i &pt1, const Point2i &pt2) +{ + return abs(pt1.x - pt2.x) == 1 && abs(pt1.y - pt2.y) == 1; +} + +void growRoute(const vector& partial, const vector& occupied, uint8_t entry, uint8_t exit) +{ + uint8_t tail = (partial.size() == 0)? entry : partial.back(); + auto candidates = getHoodIdxs(tail, false); + + // remove candidates that were neighbors of prev tail + candidates = vector(candidates.begin(), remove_if(candidates.begin(), candidates.end(), + [&occupied](uint8_t x){return find(occupied.begin(), occupied.end(), x) != occupied.end(); })); + + auto next_occupied = occupied; // neighbors of current tailed should be blacklisted for next route growth + for(auto cand : candidates) + next_occupied.push_back(cand); + + for(auto new_elem : candidates) + { + auto next_partial = partial; + next_partial.push_back(new_elem); + + auto next_tails = getHoodIdxs(new_elem, true); // true --> include border + auto find_exit_itr = find(next_tails.begin(), next_tails.end(), exit); + if(find_exit_itr != next_tails.end() && *find_exit_itr != tail) // add to cache if exit is a possible next tail + { + pair key; + + if(entry <= exit) + key = make_pair(entry, exit); + else + { + key = make_pair(exit, entry); + reverse(next_partial.begin(), next_partial.end()); + } + perturbation_cache[key].push_back(next_partial); + } + else + growRoute(next_partial, next_occupied, entry, exit); + } + + return; // No viable growth candidates +} + +vector perturb(const vector& src_curve, vector perturbation,int idx) +{ + auto pt = src_curve[idx]; + auto pt_list = getPointList(perturbation); + for(auto& pt_elem : pt_list) + { + pt_elem.x += pt.x; + pt_elem.y += pt.y; + } + + vector dest(src_curve.begin(), src_curve.begin() + idx - 1); + for(auto& pert : pt_list) + dest.push_back(pert); + auto it = src_curve.begin() + idx + 2; + while(it != src_curve.end()) + { + dest.push_back(*it); + it++; + } + + return dest; +} + +} // namespace Perturbations + +ClosedCurve::ClosedCurve(vector points) +{ + _curve_points = points; +} + +void ClosedCurve::applyPerturbation(const vector& perturbation, int idx) +{ + _curve_points = Perturbations::perturb(_curve_points, perturbation, idx); +} + +ClosedCurve ClosedCurve::perturb(const std::vector& perturbation, int idx) +{ + return ClosedCurve(Perturbations::perturb(_curve_points, perturbation, idx)); +} + +bool ClosedCurve::validateCurve(std::vector& curve) +{ + // Make sure that all consecutive points are 8-connected + auto eight_connected = [](Point2i a, Point2i b){ return abs(a.x - b.x) <= 1 && abs(a.y - b.y) <= 1; }; + if(!eight_connected(curve[0], curve.back())) + return false; + cout << "Checking for unconnected consecutive pts" << endl; + for(size_t i = 1; i < curve.size(); i++) + if(!eight_connected(curve[i - 1], curve[i])) + { + cout << "failure pts: " << curve[i - 1] << "\t" << curve[i] << endl; + return false; + } + + // Make sure that points that are not adjacent are never 8-connected + vector forbidden_neighbors; + forbidden_neighbors.push_back(curve.back()); + cout << "Checking for non-adjacent neighbors" << endl; + for(size_t i = 1; i < curve.size(); i++) + { + auto& pt = curve[i]; + auto count = count_if(forbidden_neighbors.begin(), forbidden_neighbors.end(), [pt](const Point2i &test_pt) + { return Perturbations::isNeighborPoint(pt, test_pt); }); + forbidden_neighbors.push_back(curve[i - 1]); + + if(i > curve.size() - 2) // Should be neighbor with first one added only + { + if(count > 1) + { + auto conflict_pt = find_if(forbidden_neighbors.begin(), forbidden_neighbors.end(), + [pt](Point2i test_pt){ return mil_vision::Perturbations::isNeighborPoint(pt, test_pt); }); + cout << "failure pts: " << curve[1] << "\t" << (conflict_pt != forbidden_neighbors.end()? Point2i(0,0) : *conflict_pt) << endl; + return false; + } + } + else + { + if(count > 0) + { + auto conflict_pt = find_if(forbidden_neighbors.begin(), forbidden_neighbors.end(), + [pt](Point2i test_pt){ return mil_vision::Perturbations::isNeighborPoint(pt, test_pt); }); + cout << "failure pts: " << curve[1] << "\t" << (conflict_pt != forbidden_neighbors.end()? Point2i(0,0) : *conflict_pt) << endl; + return false; + } + } + } + return true; // No failures! +} + +vector calcCosts(const Mat& img, vector candidate_perturbs, function cb) +{ + +} + +} // namespace mil_vision diff --git a/perception/mil_vision/src/mil_vision_lib/colorizer/camera_observer.cpp b/perception/mil_vision/src/mil_vision_lib/colorizer/camera_observer.cpp new file mode 100644 index 00000000..99b83fdd --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/colorizer/camera_observer.cpp @@ -0,0 +1,73 @@ +#include + +namespace mil_vision{ + +using mil_tools::operator "" _s; // converts to std::string + +CameraObserver::CameraObserver(ros::NodeHandle &nh, std::string &pcd_in_topic, std::string &cam_topic, size_t hist_size) +: _nh{nh}, _cam_stream{nh, hist_size} +{ + try + { + if(!_cam_stream.init(cam_topic)) + { + _err_msg = "COLORIZER: ROSCameraStreams could not be initialized with "_s + cam_topic + "."_s; + return; + } + } + catch(std::exception &e) + { + std::cout << __PRETTY_FUNCTION__ << " exception caught: " << e.what() << std::endl; + } + + + // Check that tf for this camera is up (default template arg is pcl::PointXYZ) + auto velodyne_msg = ros::topic::waitForMessage>(pcd_in_topic, _nh, ros::Duration{3, 0}); + std::string src_frame_id = velodyne_msg->header.frame_id; + std::string target_frame_id; + ros::Duration tf_timeout{5, 0}; // Wait 5 seconds max for each TF + std::string err = "COLORIZER: waiting for tf between "_s + src_frame_id + " and "_s + target_frame_id + ": "_s; + if(_tf_listener.waitForTransform(_cam_stream.getCameraModelPtr()->tfFrame(), src_frame_id, ros::Time(0), + tf_timeout, ros::Duration(0.05), &err)) + { + _ok = true; + return; // Ideal return point + } + else + ROS_ERROR(err.c_str()); // TF not available +} + +ColorObservation::VecImg CameraObserver::get_color_observations(const PCD::ConstPtr &pcd) +{ + using std::vector; + using std::cout; + using std::endl; + cout << __PRETTY_FUNCTION__ << endl; + + // Structre: Image pixels are lists of ColorObservations for the respective pixels in the image that have + // pcd points that would be imaged there + auto obs_img = vector>{size_t(_cam_stream.rows()), vector{size_t(_cam_stream.cols())}}; + PCD pcd_cam{}; // _cam indicates the reference frame + auto cam_model = _cam_stream.getCameraModelPtr(); + + // We will first transform the pcd from lidar frame into the frame of the camera so that we may project + // it into the image with just the camera intrinsics + pcl_ros::transformPointCloud(cam_model->tfFrame(), *pcd, pcd_cam, _tf_listener); + + cv::Matx33d K_cv = cam_model->fullIntrinsicMatrix(); + Eigen::Matrix3d K_eigen; + cv::cv2eigen(K_cv, K_eigen); + + for(auto pt : pcd_cam) + { + cout << "Pt: " << pt << endl; + auto imaged_pt = K_eigen * Eigen::Matrix{pt.x, pt.y, pt.z}; + cout << "Imaged pt: " << imaged_pt << endl; + pt = pcl::PointXYZ(imaged_pt[0], imaged_pt[1], imaged_pt[2]); + + } + return ColorObservation::VecImg(); + +} + +} // namespace mil_vision diff --git a/perception/mil_vision/src/mil_vision_lib/colorizer/color_observation.cpp b/perception/mil_vision/src/mil_vision_lib/colorizer/color_observation.cpp new file mode 100644 index 00000000..41f4867c --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/colorizer/color_observation.cpp @@ -0,0 +1,15 @@ +#include + + +namespace mil_vision{ + +UnoccludedPointsImg::UnoccludedPointsImg() +// : frame_ptr(frame_ptr), +// cloud_ptr(cloud_ptr) +{ + // auto cam_model = frame_ptr->getCameraModelPtr(); + + +} + +} // namespace mil_vision \ No newline at end of file diff --git a/perception/mil_vision/src/mil_vision_lib/colorizer/pcd_colorizer.cpp b/perception/mil_vision/src/mil_vision_lib/colorizer/pcd_colorizer.cpp new file mode 100644 index 00000000..210843e6 --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/colorizer/pcd_colorizer.cpp @@ -0,0 +1,173 @@ +#include + +using namespace std; +using namespace cv; + +namespace mil_vision{ + +PcdColorizer::PcdColorizer(ros::NodeHandle nh, string input_pcd_topic) + : _nh(nh), _img_hist_size{10}, _cloud_processor{nh, input_pcd_topic, _img_hist_size}, + _input_pcd_topic{input_pcd_topic}, _output_pcd_topic{input_pcd_topic + "_colored"} +{ + using mil_tools::operator "" _s; // converts to std::string + + try + { + // Subscribe to point cloud topic + _cloud_sub = _nh.subscribe>(_input_pcd_topic, 1, &SingleCloudProcessor::operator(), &_cloud_processor); + + // Advertise output topic + _cloud_pub = _nh.advertise>(_output_pcd_topic, 1, true); // output type will probably be different + } + catch(std::exception &e) + { + _err_msg = "COLORIZER: Suscriber or publisher error caught: "_s + e.what(); + ROS_ERROR(_err_msg.c_str()); + return; + } + + std::string msg = "COLORIZER: Initialization was "_s + (_cloud_processor.ok()? "successful" : "unsuccessful"); + msg += "\n Input point cloud topic: " + _input_pcd_topic + "\n Output point cloud topic: " + _output_pcd_topic; + ROS_INFO(msg.c_str()); + + if(_cloud_processor.ok()) + { + _ok = true; + _active = true; + } +} + +void PcdColorizer::_cloud_cb(const PCD<>::ConstPtr &cloud_in) +{ + change_input_mtx.lock(); + // _input_pcd = cloud_in; // TODO: figure out a good communication mechanism b/w colorizer, single processor and merger + ROS_INFO("Got a new cloud"); + change_input_mtx.unlock(); +} + +// PcdColorizer::PcdColorizer(ros::NodeHandle nh, string input_pcd_topic) +// : PcdSubPubAlgorithm{nh, input_pcd_topic, input_pcd_topic + "_colored"}, +// _img_hist_size{10}, +// cloud_processor{nh, _img_hist_size} +// { +// cout << "img hist size: " << _img_hist_size << endl; +// // Subscribe to all rectified color img topics +// auto rect_color_topics = mil_tools::getRectifiedImageTopics(true); +// if(rect_color_topics.size() == 0) +// { +// _err_msg += "COLORIZER: There are no rectified color camera topics currently publishing on this ROS master ("; +// _err_msg += ros::master::getURI(); +// _err_msg += ") Re-run after rectified color images are being published."; +// ROS_ERROR(_err_msg.c_str()); +// return; +// } + +// // Construct and initialize Camera Stream objects +// size_t good_init = 0; +// for(size_t i = 0; i < rect_color_topics.size(); i++) +// { +// // Construct on heap but store ptrs in stack +// _ros_cam_ptrs.emplace_back(new CamStream{this->_nh, this->_img_hist_size}); +// good_init += int(_ros_cam_ptrs[i]->init(rect_color_topics[i])); // Count successfull init +// _transformed_cloud_ptrs.emplace_back(nullptr); +// } + +// // Make sure at least one ROSCameraStream was successfully initialized +// if(good_init == 0) +// { +// _err_msg = "COLORIZER: No ROSCameraStreams could be initialized."; +// ROS_ERROR_NAMED("COLORIZER", _err_msg.c_str()); +// return; +// } + +// std::mutex mtx; + +// // LAMBDAS! +// auto check_tf_lambda = [this, &rect_color_topics, &mtx](const PointCloud::ConstPtr &cloud_in) +// { +// std::lock_guard lock{mtx}; // will unlock mtx when it goes out of scope +// ROS_INFO("COLORIZER: checking that all required TF's are available"); +// std::string src_frame_id = cloud_in->header.frame_id; +// std::string target_frame_id; +// ros::Duration tf_timeout{5, 0}; // Wait 5 seconds max for each TF +// this->_ok = true; // assume TF's are ok until proven otherwise + +// // Check each TF +// for(size_t i = 0; i < rect_color_topics.size(); i++) +// { +// std::string err {"COLORIZER: waiting for tf between "}; +// err += src_frame_id + std::string(" and ") + target_frame_id + std::string(": "); +// if(! this->_tf_listener.waitForTransform(this->_ros_cam_ptrs[i]->getCameraModelPtr()->tfFrame(), +// src_frame_id, ros::Time(0), tf_timeout, ros::Duration(0.05), &err)) +// { +// ROS_ERROR(err.c_str()); // TF not available +// this->_ok = false; +// } +// } +// if(this->_ok) +// ROS_INFO("COLORIZER: Required TF's are publishing."); +// mtx.unlock(); +// }; + +// void PcdColorizer::_cloud_cb(const PCD<>::ConstPtr &cloud_in) +// { +// std::cout << "active: " << _active << std::endl; +// std::cout << "fuck!" << std::endl; +// if(_active) +// { +// if(!_ok) +// { +// ROS_INFO((std::string("COLORIZER: received cloud msg but error flag is set: ") + _err_msg).c_str()); +// return; +// } + +// // std::cout << "fuck!" << std::endl; + +// // // Transforms pcd to frame of each camera and generates list of color observations for points +// // // observed in any of the cameras +// // _process_pcd(cloud_in); + +// // // Combines current colored point cloud with stored persistent color pcd +// // _combine_pcd(); +// } +// else +// ROS_WARN_DELAYED_THROTTLE(15, "COLORIZER: receiving clouds but not active"); +// std::cout << "Exiting " << __PRETTY_FUNCTION__ << std::endl; +// } + +// inline void PcdColorizer::_process_pcd(const PCD<>::ConstPtr &cloud_in) +// { +// std::cout << "process cloud" << std::endl; +// // Transform pcds to target tf frames +// std::string target_frame_id; + +// // Synchronize work for each camera among worker threads +// std::promise start_work_prom; +// std::shared_future star_work_fut{start_work_prom.get_future()}; +// std::vector> worker_done_proms{_ros_cam_ptrs.size()}; +// std::vector> worker_done_futs{_ros_cam_ptrs.size()}; + +// #pragma omp parallel for +// for(size_t i = 0; i < _ros_cam_ptrs.size(); i++) +// { +// target_frame_id = _ros_cam_ptrs[i]->getCameraModelPtr()->tfFrame(); +// std::shared_ptr transfromed_pcd{new PointCloud()}; +// pcl_ros::transformPointCloud(target_frame_id, *cloud_in, *transfromed_pcd, _tf_listener); +// _transformed_cloud_ptrs[i] = transfromed_pcd; +// } + + // #pragma omp parallel for + // for(size_t i = 0; i < _transformed_cloud_ptrs.size(); i++) + // { + // // This should be merged with above loop + // std::cout << "hey" << std::endl; + // } + // std::cout << "pcd: " << cloud_in->header << std::endl; +// } + +// inline void PcdColorizer::_combine_pcd() +// { + +// } + +} // namespace mil_vision diff --git a/perception/mil_vision/src/mil_vision_lib/colorizer/single_cloud_processor.cpp b/perception/mil_vision/src/mil_vision_lib/colorizer/single_cloud_processor.cpp new file mode 100644 index 00000000..4fdc8bf0 --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/colorizer/single_cloud_processor.cpp @@ -0,0 +1,58 @@ +#include + +// using namespace std; + +namespace mil_vision { + +using mil_tools::operator "" _s; // converts to std::string + +SingleCloudProcessor::SingleCloudProcessor(ros::NodeHandle nh, std::string& in_pcd_topic, size_t hist_size) +: _nh{nh}, _hist_size{hist_size} +{ + ROS_INFO(("SingleCloudProcessor: Initializing with " + in_pcd_topic).c_str()); + auto rect_color_topics = mil_tools::getRectifiedImageTopics(true); + if(rect_color_topics.size() == 0) + { + _err_msg += "COLORIZER: There are no rectified color camera topics currently publishing on this ROS master ("; + _err_msg += ros::master::getURI(); + _err_msg += ") Re-run node after rectified color images are being published."; + ROS_ERROR(_err_msg.c_str()); + return; + } + + for(auto& topic : rect_color_topics) + { + ROS_INFO(("SingleCloudProcessor: Creating CameraObserver for camera publishing to "_s + topic.c_str()).c_str()); + auto cam_observer_ptr = new CameraObserver{_nh, in_pcd_topic, topic, _hist_size}; + ROS_INFO(("CameraObserver: initialization "_s + (cam_observer_ptr->ok()? "successful" : "unsuccessful")).c_str()); + if(cam_observer_ptr->ok()) + _camera_observers.push_back(std::unique_ptr{cam_observer_ptr}); + else + delete cam_observer_ptr; + } + + if(_camera_observers.size() == 0) + { + _err_msg = "SingleCloudProcessor: No ROSCameraStreams could be initialized."; + ROS_ERROR_NAMED("COLORIZER", _err_msg.c_str()); + } + else + _ok = true; + return; +} + +void SingleCloudProcessor::operator()(const PCD::ConstPtr &pcd) +{ + + std::cout << "Called the single cloud processor operator()" << std::endl; + std::cout << "pcd: " << pcd->header << std::endl; + + + // Get color observation list from each of the observers + + // Merge lists from all of the observers + + // Summarize Confidence in each point color observation +} + +} // namespace mil_vision diff --git a/perception/mil_vision/src/mil_vision_lib/cv_utils.cc b/perception/mil_vision/src/mil_vision_lib/cv_utils.cc new file mode 100644 index 00000000..07364acd --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/cv_utils.cc @@ -0,0 +1,656 @@ +#include + +namespace mil_vision { + +cv::Point contour_centroid(Contour &contour) { + cv::Moments m = cv::moments(contour, true); + cv::Point center(m.m10 / m.m00, m.m01 / m.m00); + return center; +} + +bool larger_contour(const Contour &c1, const Contour &c2) { + if (cv::contourArea(c1) > cv::contourArea(c2)) + return true; + else + return false; +} + +cv::MatND smooth_histogram(const cv::MatND &histogram, + size_t filter_kernel_size, float sigma) { + cv::MatND hist = histogram.clone(); + std::vector gauss_kernel = + generate_gaussian_kernel_1D(filter_kernel_size, sigma); + size_t histSize = hist.total(); + int offset = (filter_kernel_size - 1) / 2; + for (size_t i = offset; i < histSize - offset; + i++) // Convolve histogram values with gaussian kernel + { + int sum = 0; + int kernel_idx = 0; + for (int j = i - offset; j <= int(i + offset); j++) { + sum += (hist.at(j) * gauss_kernel[kernel_idx++]); + } + hist.at(i) = sum; + } + for (int i = 0; i < offset; ++i) // Pad filtered result with zeroes + { + hist.at(i) = 0; + hist.at(histSize - 1 - i) = 0; + } + return hist; +} + +std::vector generate_gaussian_kernel_1D(size_t kernel_size, + float sigma) { + std::vector kernel; + int middle_index = (kernel_size - 1) / 2; + int first_discrete_sample_x = -(middle_index); + for (int i = first_discrete_sample_x; i <= 0; i++) { + float power = -0.5 * (float(i) / sigma) * (float(i) / sigma); + kernel.push_back( + exp(power)); // From definition of Standard Normal Distribution + } + for (int i = 1; i <= middle_index; i++) { // Kernel is symmetric + kernel.push_back(kernel[middle_index - i]); + } + // Normalize kernel (sum of values should equal 1.0) + float sum = 0; + for (size_t i = 0; i < kernel_size; i++) { + sum += kernel[i]; + } + for (size_t i = 0; i < kernel_size; i++) { + kernel[i] /= sum; + } + return kernel; +} + +std::vector find_local_maxima(const cv::MatND &histogram, + float thresh_multiplier) { + + std::stringstream ros_log; + ros_log << "\x1b[1;31m" + << "find_local_maxima" + << "\x1b[0m" << std::endl; + + std::vector local_maxima, threshed_local_maxima; + float global_maximum = -std::numeric_limits::infinity(); + + // Locate local maxima and find global maximum + for (size_t idx = 1; idx < histogram.total() - 1; idx++) { + float current_value = histogram.at(idx); + if ((histogram.at(idx - 1) < current_value) && + (histogram.at(idx + 1) <= current_value)) { + local_maxima.push_back(cv::Point(idx, current_value)); + if (global_maximum < current_value) + global_maximum = current_value; + } + } + ros_log << "Maxima [x, y]:"; + for (size_t i = 0; i < local_maxima.size(); i++) { + if (local_maxima[i].y > global_maximum * thresh_multiplier) + threshed_local_maxima.push_back(local_maxima[i]); + if (i % 4 == 0) + ros_log << std::endl + << "\t"; + ros_log << "[" << std::setw(5) << local_maxima[i].x << "," << std::setw(5) + << local_maxima[i].y << "] "; + } + ros_log << std::endl + << "thresh: > global_maximum(" << global_maximum + << ") * thresh_multiplier(" << thresh_multiplier + << ") = " << (global_maximum * thresh_multiplier); + ros_log << std::endl + << "Threshed Maxima (x): "; + if (threshed_local_maxima.size() != local_maxima.size()) { + BOOST_FOREACH (cv::Point pt, threshed_local_maxima) { + ros_log << " " << pt.x << " "; + } + } else + ros_log << "same as 'Maxima'"; + ros_log << std::endl; +#ifdef SEGMENTATION_DEBUG + ROS_INFO(ros_log.str().c_str()); +#endif + return threshed_local_maxima; +} + +std::vector find_local_minima(const cv::MatND &histogram, + float thresh_multiplier) { + + std::stringstream ros_log; + ros_log << "\x1b[1;31m" + << "find_local_minima" + << "\x1b[0m" << std::endl; + + std::vector local_minima, threshed_local_minima; + float global_minimum = std::numeric_limits::infinity(); + ; + + // Locate local minima and find global minimum + for (size_t idx = 1; idx < histogram.total() - 1; idx++) { + float current_value = histogram.at(idx); + if ((histogram.at(idx - 1) >= current_value) && + (histogram.at(idx + 1) > current_value)) { + local_minima.push_back(cv::Point(idx, current_value)); + if (global_minimum > current_value) + global_minimum = current_value; + } + } + ros_log << "Minima [x, y]:"; + for (size_t i = 0; i < local_minima.size(); i++) { + if (local_minima[i].y < global_minimum * thresh_multiplier) + threshed_local_minima.push_back(local_minima[i]); + if (i % 4 == 0) + ros_log << std::endl + << "\t"; + ros_log << "[" << std::setw(5) << local_minima[i].x << "," << std::setw(5) + << local_minima[i].y << "] "; + } + ros_log << std::endl + << "thresh: < global_minimum(" << global_minimum + << ") * thresh_multiplier(" << thresh_multiplier + << ") = " << (global_minimum * thresh_multiplier); + ros_log << std::endl + << "Threshed Minima (x): "; + if (threshed_local_minima.size() != local_minima.size()) { + BOOST_FOREACH (cv::Point pt, threshed_local_minima) { + ros_log << " " << pt.x << " "; + } + } else + ros_log << "same as 'Minima'"; + ros_log << std::endl; +#ifdef SEGMENTATION_DEBUG + ROS_INFO(ros_log.str().c_str()); +#endif + return threshed_local_minima; +} + +unsigned int select_hist_mode(std::vector &histogram_modes, + int target) { + + std::stringstream ros_log; + ros_log << "\x1b[1;31m" + << "select_hist_mode" + << "\x1b[0m" << std::endl; + + std::vector distances; + BOOST_FOREACH (cv::Point mode, histogram_modes) { + distances.push_back(mode.x - target); + } + int min_idx = 0; + for (size_t i = 0; i < distances.size(); i++) { + if (std::abs(distances[i]) <= std::abs(distances[min_idx])) + min_idx = i; + } + if (histogram_modes.size() == 0) { + ros_log << "No modes could be generated" << std::endl; + ROS_INFO(ros_log.str().c_str()); + return 0; + } else + return histogram_modes[min_idx].x; +} + +void statistical_image_segmentation(const cv::Mat &src, cv::Mat &dest, + cv::Mat &debug_img, const int hist_size, + const float **ranges, const int target, + std::string image_name, bool ret_dbg_img, + const float sigma, + const float low_thresh_gain, + const float high_thresh_gain) { + std::stringstream ros_log; + ros_log << "\x1b[1;31m" + << "statistical_image_segmentation" + << "\x1b[0m" << std::endl; + + // Calculate histogram + cv::MatND hist, hist_smooth, hist_derivative; + cv::calcHist(&src, 1, 0, cv::Mat(), hist, 1, &hist_size, ranges, true, false); + + // Smooth histogram + const int kernel_size = 11; + hist_smooth = mil_vision::smooth_histogram(hist, kernel_size, sigma); + + // Calculate histogram derivative (central finite difference) + hist_derivative = hist_smooth.clone(); + hist_derivative.at(0) = 0; + hist_derivative.at(hist_size - 1) = 0; + for (int i = 1; i < hist_size - 1; ++i) { + hist_derivative.at(i) = + (hist_smooth.at(i + 1) - hist_smooth.at(i - 1)) / 2.0; + } + hist_derivative = mil_vision::smooth_histogram(hist_derivative, kernel_size, sigma); + + // Find target mode + std::vector histogram_modes = + mil_vision::find_local_maxima(hist_smooth, 0.1); + int target_mode = mil_vision::select_hist_mode(histogram_modes, target); + ros_log << "Target: " << target << std::endl; + ros_log << "Mode Selected: " << target_mode << std::endl; + + // Calculate std dev of histogram slopes + cv::Scalar hist_deriv_mean, hist_deriv_stddev; + cv::meanStdDev(hist_derivative, hist_deriv_mean, hist_deriv_stddev); + + // Determine thresholds for cv::inRange() using the std dev of histogram + // slopes times a gain as a cutoff heuristic + int high_abs_derivative_thresh = + std::abs(hist_deriv_stddev[0] * high_thresh_gain); + int low_abs_derivative_thresh = + std::abs(hist_deriv_stddev[0] * low_thresh_gain); + std::vector derivative_maxima = + mil_vision::find_local_maxima(hist_derivative, 0.01); + std::vector derivative_minima = + mil_vision::find_local_minima(hist_derivative, 0.01); + int high_thresh_search_start = target_mode; + int low_thresh_search_start = target_mode; + ros_log << "high_thresh_search_start: " << target_mode << std::endl; + ros_log << "Looking for the local minimum of the derivative of the histogram " + "immediately to the right of the selected mode." << std::endl; + + for (size_t i = 0; i < derivative_minima.size(); i++) { + ros_log << "\tderivative_minima[" << i << "].x = " << derivative_minima[i].x + << std::endl; + if (derivative_minima[i].x > target_mode) { + high_thresh_search_start = derivative_minima[i].x; + ros_log << "Done: The upper threshold will be no less than " + << high_thresh_search_start << std::endl; + break; + } + } + ros_log << "low_thresh_search_start: " << target_mode << std::endl; + ros_log << "Looking for the local maximum of the derivative of the histogram " + "immediately to the left of the selected mode." << std::endl; + for (int i = derivative_maxima.size() - 1; i >= 0; i--) { + ros_log << "\tderivative_maxima[" << i << "].x = " << derivative_maxima[i].x + << std::endl; + if (derivative_maxima[i].x < target_mode) { + low_thresh_search_start = derivative_maxima[i].x; + ros_log << "Done: The lower threshold will be no greater than " + << low_thresh_search_start << std::endl; + break; + } + } + int high_thresh = high_thresh_search_start; + int low_thresh = low_thresh_search_start; + ros_log << "high_deriv_thresh: " << hist_deriv_stddev[0] << " * " + << high_thresh_gain << " = " << high_abs_derivative_thresh + << std::endl; + ros_log << "abs(high_deriv_thresh) - abs(slope) = slope_error" << std::endl; + ros_log << "i: potential upper threshold" << std::endl; + ros_log << "Looking for first i such that slope_error >= 0" << std::endl; + for (int i = high_thresh_search_start; i < hist_size; i++) { + int abs_slope = std::abs(hist_derivative.at(i)); + ros_log << "\ti = " << i << " : " << high_abs_derivative_thresh << " - " + << abs_slope << " = " << high_abs_derivative_thresh - abs_slope + << std::endl; + if (abs_slope <= high_abs_derivative_thresh) { + high_thresh = i; + break; + } + } + ros_log << "high_thresh = " << high_thresh << std::endl; + ros_log << "low_deriv_thresh: " << hist_deriv_stddev[0] << " * " + << low_thresh_gain << " = " << low_abs_derivative_thresh << std::endl; + ros_log << "abs(low_deriv_thresh) - abs(slope) = slope_error" << std::endl; + ros_log << "i: potential lower threshold" << std::endl; + ros_log << "Looking for first i such that slope_error <= 0" << std::endl; + for (int i = low_thresh_search_start; i > 0; i--) { + int abs_slope = std::abs(hist_derivative.at(i)); + ros_log << "\ti = " << i << " : " << low_abs_derivative_thresh << " - " + << abs_slope << " = " << high_abs_derivative_thresh - abs_slope + << std::endl; + + if (abs_slope <= low_abs_derivative_thresh) { + low_thresh = i; + break; + } + } + + ros_log << "low_thresh = " << low_thresh << std::endl; + ros_log << "\x1b[1;37mTarget: " << target + << "\nClosest distribution mode: " << target_mode + << "\nThresholds selected: low=" << low_thresh + << " high=" << high_thresh << "\x1b[0m" << std::endl; + + // Threshold image + cv::inRange(src, low_thresh, high_thresh, dest); + +#ifdef SEGMENTATION_DEBUG + ROS_INFO(ros_log.str().c_str()); + cv::imshow("src" + image_name, src); + cv::waitKey(1); +#endif + + // Closing Morphology operation + int dilation_size = 2; + cv::Mat structuring_element = cv::getStructuringElement( + cv::MORPH_ELLIPSE, cv::Size(2 * dilation_size + 1, 2 * dilation_size + 1), + cv::Point(dilation_size, dilation_size)); + cv::dilate(dest, dest, structuring_element); + cv::erode(dest, dest, structuring_element); + + if (ret_dbg_img) { + + try { + // Prepare to draw graph of histogram and derivative + int hist_w = src.cols; + int hist_h = src.rows; + int bin_w = hist_w / hist_size; + cv::Mat histImage(hist_h, hist_w, CV_8UC1, cv::Scalar(0, 0, 0)); + cv::Mat histDerivImage(hist_h, hist_w, CV_8UC1, cv::Scalar(0, 0, 0)); + cv::normalize(hist_smooth, hist_smooth, 0, histImage.rows, + cv::NORM_MINMAX, -1, cv::Mat()); + cv::normalize(hist_derivative, hist_derivative, 0, histImage.rows, + cv::NORM_MINMAX, -1, cv::Mat()); + + // Draw Graphs + for (int i = 1; i < hist_size; i++) { + // Plot image histogram + cv::line( + histImage, + cv::Point(bin_w * (i - 1), + hist_h - cvRound(hist_smooth.at(i - 1))), + cv::Point(bin_w * (i), hist_h - cvRound(hist_smooth.at(i))), + cv::Scalar(255, 0, 0), 2, 8, 0); + // Plot image histogram derivative + cv::line(histDerivImage, + cv::Point(bin_w * (i - 1), + hist_h - cvRound(hist_derivative.at(i - 1))), + cv::Point(bin_w * (i), + hist_h - cvRound(hist_derivative.at(i))), + cv::Scalar(122, 0, 0), 1, 8, 0); + } + + // Shade in area being segmented under histogram curve + cv::line(histImage, + cv::Point(bin_w * low_thresh, + hist_h - cvRound(hist_smooth.at(low_thresh))), + cv::Point(bin_w * low_thresh, hist_h), cv::Scalar(255, 255, 0), + 2); + cv::line(histImage, + cv::Point(bin_w * high_thresh, + hist_h - cvRound(hist_smooth.at(high_thresh))), + cv::Point(bin_w * high_thresh, hist_h), cv::Scalar(255, 255, 0), + 2); + cv::floodFill( + histImage, + cv::Point(bin_w * cvRound(float(low_thresh + high_thresh) / 2.0), + hist_h - 1), + cv::Scalar(155)); + + // Combine graphs into one image and display results + cv::Mat segmentation_channel = cv::Mat::zeros(histImage.size(), CV_8UC1); + std::vector debug_img_channels; + debug_img_channels.push_back(histImage); + debug_img_channels.push_back(histDerivImage); + debug_img_channels.push_back(dest * 0.25); + cv::merge(debug_img_channels, debug_img); + cv::Point text_upper_left(debug_img.cols / 2.0, debug_img.rows / 10.0); + std::string text_ln1 = image_name; + std::stringstream text_ln2; + text_ln2 << "low = " << low_thresh; + std::stringstream text_ln3; + text_ln3 << "high = " << high_thresh; + int font = cv::FONT_HERSHEY_SIMPLEX; + double font_scale = 0.0015 * debug_img.rows; + cv::Point vert_offset = cv::Point(0, debug_img.rows / 15.0); + cv::Scalar text_color(255, 255, 0); + cv::putText(debug_img, text_ln1, text_upper_left, font, font_scale, + text_color); + cv::putText(debug_img, text_ln2.str(), text_upper_left + vert_offset, + font, font_scale, text_color); + cv::putText(debug_img, text_ln3.str(), + text_upper_left + vert_offset + vert_offset, font, font_scale, + text_color); + } catch (std::exception &e) { + ROS_INFO(e.what()); + } + } +} + +cv::Mat triangulate_Linear_LS(cv::Mat mat_P_l, cv::Mat mat_P_r, + cv::Mat undistorted_l, cv::Mat undistorted_r) { + cv::Mat A(4, 3, CV_64FC1), b(4, 1, CV_64FC1), X(3, 1, CV_64FC1), + X_homogeneous(4, 1, CV_64FC1), W(1, 1, CV_64FC1); + W.at(0, 0) = 1.0; + A.at(0, 0) = + (undistorted_l.at(0, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 0) - + mat_P_l.at(0, 0); + A.at(0, 1) = + (undistorted_l.at(0, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 1) - + mat_P_l.at(0, 1); + A.at(0, 2) = + (undistorted_l.at(0, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 2) - + mat_P_l.at(0, 2); + A.at(1, 0) = + (undistorted_l.at(1, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 0) - + mat_P_l.at(1, 0); + A.at(1, 1) = + (undistorted_l.at(1, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 1) - + mat_P_l.at(1, 1); + A.at(1, 2) = + (undistorted_l.at(1, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 2) - + mat_P_l.at(1, 2); + A.at(2, 0) = + (undistorted_r.at(0, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 0) - + mat_P_r.at(0, 0); + A.at(2, 1) = + (undistorted_r.at(0, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 1) - + mat_P_r.at(0, 1); + A.at(2, 2) = + (undistorted_r.at(0, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 2) - + mat_P_r.at(0, 2); + A.at(3, 0) = + (undistorted_r.at(1, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 0) - + mat_P_r.at(1, 0); + A.at(3, 1) = + (undistorted_r.at(1, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 1) - + mat_P_r.at(1, 1); + A.at(3, 2) = + (undistorted_r.at(1, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 2) - + mat_P_r.at(1, 2); + b.at(0, 0) = + -((undistorted_l.at(0, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 3) - + mat_P_l.at(0, 3)); + b.at(1, 0) = + -((undistorted_l.at(1, 0) / undistorted_l.at(2, 0)) * + mat_P_l.at(2, 3) - + mat_P_l.at(1, 3)); + b.at(2, 0) = + -((undistorted_r.at(0, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 3) - + mat_P_r.at(0, 3)); + b.at(3, 0) = + -((undistorted_r.at(1, 0) / undistorted_r.at(2, 0)) * + mat_P_r.at(2, 3) - + mat_P_r.at(1, 3)); + solve(A, b, X, cv::DECOMP_SVD); + vconcat(X, W, X_homogeneous); + return X_homogeneous; +} + +Eigen::Vector3d kanatani_triangulation(const cv::Point2d &pt1, + const cv::Point2d &pt2, + const Eigen::Matrix3d &essential, + const Eigen::Matrix3d &R) { + /* + K. Kanatani, Y. Sugaya, and H. Niitsuma. Triangulation from two views + revisited: Hartley-Sturm vs. optimal + correction. In British Machine Vision Conference, page 55, 2008. + */ + // std::cout << "ptL_noisy: " << pt1 << " ptR_noisy: " << pt2 << std::endl; + const unsigned int max_iterations = 7; + Eigen::Vector3d p1_old(pt1.x, pt1.y, 1.0); + Eigen::Vector3d p2_old(pt2.x, pt2.y, 1.0); + const Eigen::Vector3d p1_0(pt1.x, pt1.y, 1.0); + const Eigen::Vector3d p2_0(pt2.x, pt2.y, 1.0); + Eigen::Vector3d p1, p2; + Eigen::Vector2d n1, n2, delta_p1, delta_p2, delta_p1_old, delta_p2_old; + delta_p1_old << 0.0, 0.0; + delta_p2_old << 0.0, 0.0; + Eigen::Matrix S; + S << 1, 0, 0, 0, 1, 0; + Eigen::Matrix2d essential_bar = essential.topLeftCorner(2, 2); + double lambda; + for (unsigned int i = 0; i < max_iterations; i++) { + n1 = S * (essential * p2_old); + n2 = S * (essential.transpose() * p1_old); + lambda = ((p1_0.transpose() * essential * p2_0)(0) - + (delta_p1_old.transpose() * essential_bar * delta_p2_old)(0)) / + (n1.transpose() * n1 + n2.transpose() * n2)(0); + delta_p1 = lambda * n1; + delta_p2 = lambda * n2; + p1 = p1_0 - (S.transpose() * delta_p1); + p2 = p2_0 - (S.transpose() * delta_p2); + p1_old = p1; + p2_old = p2; + delta_p1_old = delta_p1; + delta_p2_old = delta_p2; + // std::cout << "ptL_est: [" << p1.transpose() << "] ptR_est: [" << + // p2.transpose() << "]" << std::endl; + } + Eigen::Vector3d z = p1.cross(R * p2); + Eigen::Vector3d X = + ((z.transpose() * (essential * p2))(0) / (z.transpose() * z)(0)) * p1; + return X; +} + +Eigen::Vector3d lindstrom_triangulation(const cv::Point2d &pt1, + const cv::Point2d &pt2, + const Eigen::Matrix3d &essential, + const Eigen::Matrix3d &R) { + /* + Optimal triangulation method for two cameras with parallel principal + axes + Based of off this paper by Peter Lindstrom: + https://e-reports-ext.llnl.gov/pdf/384387.pdf **Listing 2** + */ + // std::cout << "ptL_noisy: " << pt1 << " ptR_noisy: " << pt2 << std::endl; + const unsigned int max_iterations = 7; + Eigen::Vector3d p1_old(pt1.x, pt1.y, 1.0); + Eigen::Vector3d p2_old(pt2.x, pt2.y, 1.0); + const Eigen::Vector3d p1_0(pt1.x, pt1.y, 1.0); + const Eigen::Vector3d p2_0(pt2.x, pt2.y, 1.0); + Eigen::Vector3d p1, p2; + Eigen::Vector2d n1, n2, delta_p1, delta_p2; + Eigen::Matrix S; + S << 1, 0, 0, 0, 1, 0; + Eigen::Matrix2d essential_bar = essential.topLeftCorner(2, 2); + double a, b, c, d, lambda; + c = p1_0.transpose() * (essential * p2_0); + for (unsigned int i = 0; i < max_iterations; i++) { + n1 = S * (essential * p2_old); + n2 = S * (essential.transpose() * p1_old); + a = n1.transpose() * (essential_bar * n2); + b = (0.5 * ((n1.transpose() * n1) + (n2.transpose() * n2)))(0); + d = sqrt(b * b - a * c); + double signum_b = (b > 0) ? 1 : ((b < 0) ? -1 : 0); + lambda = c / (b + signum_b * d); + delta_p1 = lambda * n1; + delta_p2 = lambda * n2; + p1 = p1_0 - (S.transpose() * delta_p1); + p2 = p2_0 - (S.transpose() * delta_p2); + p1_old = p1; + p2_old = p2; + // std::cout << "ptL_est: [" << p1.transpose() << "] ptR_est: [" << + // p2.transpose() << "]" << std::endl; + } + Eigen::Vector3d z = p1.cross(R * p2); + Eigen::Vector3d X = + ((z.transpose() * (essential * p2))(0) / (z.transpose() * z)(0)) * p1; + return X; +} + +ImageWithCameraInfo::ImageWithCameraInfo( + sensor_msgs::ImageConstPtr _image_msg_ptr, + sensor_msgs::CameraInfoConstPtr _info_msg_ptr) + : image_msg_ptr(_image_msg_ptr), info_msg_ptr(_info_msg_ptr), + image_time(_image_msg_ptr->header.stamp) {} + +FrameHistory::FrameHistory(std::string img_topic, unsigned int hist_size) + : topic_name(img_topic), history_size(hist_size), _image_transport(nh), + frame_count(0) { + std::stringstream console_msg; + console_msg << "[FrameHistory] size set to " << history_size << std::endl + << "\tSubscribing to image topic: " << topic_name; + ROS_INFO(console_msg.str().c_str()); + _image_sub = _image_transport.subscribeCamera( + img_topic, 1, &FrameHistory::image_callback, this); + if (_image_sub.getNumPublishers() == 0) { + std::stringstream error_msg; + error_msg << "[FrameHistory] no publishers currently publishing to " + << topic_name; + ROS_WARN(error_msg.str().c_str()); + } +} + +FrameHistory::~FrameHistory() { + std::stringstream console_msg; + console_msg << "[FrameHistory] Unsubscribed from image topic: " << topic_name + << std::endl + << "[FrameHistory] Deleting FrameHistory object" << std::endl; + ROS_INFO(console_msg.str().c_str()); +} + +void FrameHistory::image_callback( + const sensor_msgs::ImageConstPtr &image_msg, + const sensor_msgs::CameraInfoConstPtr &info_msg) { + /** + Adds an ImageWithCameraInfo object to the frame history ring buffer + */ + ImageWithCameraInfo current_frame(image_msg, info_msg); + bool full = _frame_history_ring_buffer.size() >= history_size; + std::stringstream debug_msg; + debug_msg << "Adding frame to ring buffer " + << "[frame=" << frame_count << "," + << "full=" << (full ? "true" : "false") + << ",frames_available=" << _frame_history_ring_buffer.size() << "]" + << std::endl; + ROS_DEBUG(debug_msg.str().c_str()); + if (!full) { + _frame_history_ring_buffer.push_back(current_frame); + } else { + _frame_history_ring_buffer[frame_count % history_size] = current_frame; + } + frame_count++; +} + +std::vector +FrameHistory::get_frame_history(unsigned int frames_requested) { + /** + Returns a vector with the last ImageWithCameraInfo + objects + */ + std::vector frame_history; + std::vector sorted_frame_history = + _frame_history_ring_buffer; + if (_frame_history_ring_buffer.size() < frames_requested) { + ROS_WARN("get_frame_history(%d): %d frames were requested, but there are " + "%d frames available", + frames_requested, frames_requested, + _frame_history_ring_buffer.size()); + } else { + std::sort(sorted_frame_history.begin(), sorted_frame_history.end()); + for (size_t i = 0; i < frames_requested; i++) { + frame_history.push_back(sorted_frame_history[i]); + if (i == frames_requested - 1) + break; + } + } + return frame_history; +} + +} // namespace mil_vision diff --git a/perception/mil_vision/src/mil_vision_lib/image_filtering.cpp b/perception/mil_vision/src/mil_vision_lib/image_filtering.cpp new file mode 100644 index 00000000..a7bf1b1b --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/image_filtering.cpp @@ -0,0 +1,112 @@ +#include + +namespace mil_vision +{ + +cv::Mat rotateKernel(const cv::Mat &kernel, float theta, bool deg, bool no_expand) +{ + theta = deg? theta : theta * mil_tools::PI / 180.0f; + cv::Point2f c_org{kernel.cols * 0.5f, kernel.rows * 0.5f}; // center of original + + if(no_expand) // rotates without expanding the canvas + { + cv::Mat result; + cv::Mat rot_mat = cv::getRotationMatrix2D(c_org, theta , 1.0f); + cv::warpAffine(kernel, result, rot_mat, kernel.size()); + return result; + } + + // Determine affine transform to move from top left to center of output size + float hypot = std::hypot(c_org.x, c_org.y); + cv::Point2f c_dest{hypot, hypot}; + float center_dest_coeffs[6] = {1.0f, 0.0f, c_dest.x - c_org.x, 0.0f, 1.0f, c_dest.y - c_org.y}; + cv::Mat center_dest = cv::Mat{2, 3, CV_32F, center_dest_coeffs}; + + // Move into rotation position in larger canvas + cv::Mat dest = cv::Mat::zeros(cv::Size(hypot * 2, hypot * 2), CV_8U); + auto dest_top_left = dest(cv::Rect(0, 0, kernel.cols, kernel.rows)); + kernel.copyTo(dest_top_left); + cv::warpAffine(dest, dest, center_dest, dest.size()); // center dest is ready for rotating + + // Rotate kernel about center point + cv::Mat rot_mat = cv::getRotationMatrix2D(c_dest, theta , 1.0); + cv::warpAffine(dest, dest, rot_mat, dest.size()); + + return dest; +} + +cv::Mat makeRotInvariant(const cv::Mat &kernel, int rotations) +{ + // Get angles of rotation + std::vector rotation_angles; + float delta_theta = 360.0f / rotations; + float theta = 0.0f; + while(true) + { + theta += delta_theta; + if(theta < 360.0f) + rotation_angles.push_back(theta); + else + break; + } + + // Incrementally rotate and save versions of original kernel + std::vector kernel_rotations; + cv::Mat dest = rotateKernel(kernel, 0.0f); + kernel_rotations.push_back(dest); + cv::Point2f c_dest{dest.cols * 0.5f, dest.rows * 0.5f}; + for(auto theta : rotation_angles) + kernel_rotations.push_back(rotateKernel(dest, theta, true, true)); + + // Average all rotated versions + cv::Mat sum = cv::Mat::zeros(dest.size(), CV_32S); + for(auto& rot_kernel : kernel_rotations) + cv::add(sum, rot_kernel, sum, cv::Mat(), CV_32S); + cv::Mat result = sum / float(kernel_rotations.size()); + result.convertTo(result, kernel.type()); + return result; +} + +float getRadialSymmetryAngle(const cv::Mat &kernel, float ang_res, bool deg) +{ + auto original = rotateKernel(kernel, 0.0f); + cv::Mat elem_wise_mult{original.size(), CV_32S}; + cv::multiply(original, original, elem_wise_mult); + auto standard = cv::sum(elem_wise_mult)[0]; + float max = deg? 360.0f : 2 * mil_tools::PI; + float result = max; + float best_score = 0; + bool left_starting_region = false; + + for(float theta = 0.0f; theta < max; theta += (deg? ang_res * 180.0f / mil_tools::PI : ang_res)) + { + cv::multiply(original, rotateKernel(original, theta, deg, true), elem_wise_mult); + double score = standard / cv::sum(elem_wise_mult)[0]; + if(score < 0.9) + left_starting_region = true; + if(!left_starting_region) + continue; + if(score >= 0.975) + { + if(result == max) // First good candidate + { + result = theta; + best_score = score; + } + else if(score > best_score) // Improved candidate + { + result = theta; + best_score = score; + } + else // Decreased candidate above 0.975 + break; + } + else // Decreased candidate below 0.975 + if(result != max) // Viable candidate already found + break; + } + + return result; +} + +} // namespace mil_vision diff --git a/perception/mil_vision/src/mil_vision_lib/point_cloud_algorithms.cc b/perception/mil_vision/src/mil_vision_lib/point_cloud_algorithms.cc new file mode 100644 index 00000000..a0d23231 --- /dev/null +++ b/perception/mil_vision/src/mil_vision_lib/point_cloud_algorithms.cc @@ -0,0 +1,142 @@ +#include + +using namespace std; +using namespace cv; + +namespace mil_vision { + +Mat g_color_sequence; + +PcdColorizer::PcdColorizer(ros::NodeHandle nh, string input_pcd_topic, string output_pcd_topic, string rgb_cam_topic, string rgb_cam_frame) +{ + this->nh = nh; + this->input_pcd_topic = input_pcd_topic; + this->output_pcd_topic = output_pcd_topic; + this->rgb_cam_topic = rgb_cam_topic; + this->rgb_cam_frame = rgb_cam_frame; + + cloud_sub = nh.subscribe(input_pcd_topic, 10, &PcdColorizer::cloud_cb, this); + cloud_pub = nh.advertise(output_pcd_topic, 10, false); + rgb_cam_sub = img_transport.subscribeCamera(rgb_cam_topic, 10, + [this](const sensor_msgs::ImageConstPtr &image_msg_ptr, const sensor_msgs::CameraInfoConstPtr &info_msg_ptr) + { + this->latest_frame_img_msg = image_msg_ptr; + this->latest_frame_info_msg = info_msg_ptr; + if(!_intrinsics_set){ + auto K = info_msg_ptr->K; + cam_intrinsics << K[0], K[1], K[2], + K[3], K[4], K[5], + K[6], K[7], K[8]; + _intrinsics_set = true; + } + } + ); +} + +void PcdColorizer::cloud_cb(const PCD &cloud_msg){ + input_pcd = cloud_msg; + _transform_to_cam(); + _color_pcd(); + cloud_pub.publish(output_pcd); + seq++; +} + +void PcdColorizer::_transform_to_cam(){ + tf::StampedTransform vel_to_cam_tf; + ros::Time vel_cloud_stamp = input_pcd.header.stamp; + if(ros::ok()){ + try{ + // tf_listener.lookupTransform(input_pcd.header.frame_id, rgb_cam_frame, vel_cloud_stamp, vel_to_cam_tf); + tf_listener.lookupTransform(rgb_cam_frame, input_pcd.header.frame_id, vel_cloud_stamp, vel_to_cam_tf); + } + catch(std::exception &e){ + cout << "Unable to look up tf between pcd and rgb camera. Error: " << e.what() << endl; + } + } + pcl_ros::transformPointCloud(rgb_cam_frame, vel_to_cam_tf, input_pcd, transformed_pcd); + // pcl_ros::transformPointCloud(rgb_cam_frame, input_pcd, pcd_cam, tf_listener); + +} + +void PcdColorizer::_color_pcd(){ + // confgure output_pcd header + output_pcd = PCD(); + output_pcd.header.stamp = input_pcd.header.stamp; + output_pcd.header.seq = seq; + output_pcd.header.frame_id = rgb_cam_frame; + // cout << "OUT CLOUD frame:" << rgb_cam_frame << endl; + output_pcd.height = 1; + output_pcd.width = input_pcd.width; + output_pcd.is_bigendian = false; + output_pcd.is_dense = false; + + // Create classes to iterate over and modify clouds + sensor_msgs::PointCloud2Iterator iter_x_input{transformed_pcd, "x"}; + sensor_msgs::PointCloud2Modifier output_pcd_modifier{output_pcd}; + output_pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "rgb"); + sensor_msgs::PointCloud2Iterator iter_x{output_pcd, "x"}; + sensor_msgs::PointCloud2Iterator iter_y{output_pcd, "y"}; + sensor_msgs::PointCloud2Iterator iter_z{output_pcd, "z"}; + sensor_msgs::PointCloud2Iterator iter_b(output_pcd, "b"); + sensor_msgs::PointCloud2Iterator iter_g(output_pcd, "g"); + sensor_msgs::PointCloud2Iterator iter_r(output_pcd, "r"); + // cout << "iter_x_output: " << iter_x_output.data_ << endl; + // cout << "iter_rgb_output: " << iter_rgb_output.data_ << endl; + + // Latest frame + Mat latest_frame_mat; + cout << "latest_frame_ptr:" << latest_frame_img_msg << endl; + if(latest_frame_img_msg == nullptr) + return; + // cout << "Getting frame:" << latest_frame_img_msg << endl; + cv_bridge::CvImagePtr input_bridge = cv_bridge::toCvCopy(latest_frame_img_msg, sensor_msgs::image_encodings::BGR8); + // cout << "extracting frame" << endl; + latest_frame_mat = input_bridge->image; + // cout << "There is no god!" << endl; + cout << "K: " << cam_intrinsics << endl; + + + + // lambda for checking if point is visible from camera + auto in_view = [=](Eigen::Vector3f v) + {return v[0] >= 0 && v[0] < latest_frame_mat.cols && v[1] >= 0 && v[1] < latest_frame_mat.rows;}; + + while(iter_x_input != iter_x_input.end()) + { + Eigen::Vector3f xyz; + xyz << iter_x_input[0], iter_x_input[1], iter_x_input[2]; + Eigen::Vector3f hom_img_coordinates; + hom_img_coordinates = cam_intrinsics * xyz; // Project point in cam frame into image plane + hom_img_coordinates = hom_img_coordinates / hom_img_coordinates[2]; // normalize homogenous vector + Vec3b rgb; + if(in_view(hom_img_coordinates)){ + // cout << "hom_coords: " << hom_img_coordinates << " cols: " << latest_frame_mat.cols << " rows: " + // << latest_frame_mat.rows << endl; + rgb = latest_frame_mat.at(hom_img_coordinates[1], hom_img_coordinates[0]); + } + else + { + rgb[0] = 122; + rgb[1] = 122; + rgb[2] = 122; + } + + // Set the values of the fields of the colored output cloud + *iter_x = xyz[0]; + *iter_y = xyz[1]; + *iter_z = xyz[2]; + *iter_b = rgb[0]; + *iter_g = rgb[1]; + *iter_r = rgb[2]; + + ++iter_x_input; + ++iter_x; + ++iter_y; + ++iter_z; + ++iter_r; + ++iter_g; + ++iter_b; + } +} + +} //namespace mil_vision diff --git a/utils/mil_msgs/CMakeLists.txt b/utils/mil_msgs/CMakeLists.txt new file mode 100644 index 00000000..23439bc5 --- /dev/null +++ b/utils/mil_msgs/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mil_msgs) +find_package(catkin + REQUIRED COMPONENTS + message_generation + message_runtime + geometry_msgs + actionlib + interactive_markers + std_msgs + actionlib_msgs +) + +add_action_files(FILES + MoveTo.action +) + +add_message_files(FILES + PoseTwistStamped.msg + PoseTwist.msg + VelocityMeasurements.msg + DepthStamped.msg + RangeStamped.msg + VelocityMeasurement.msg + Acceleration.msg +) + +add_service_files(FILES + CameraToLidarTransform.srv +) + +generate_messages( + DEPENDENCIES std_msgs actionlib_msgs geometry_msgs +) +catkin_package( + CATKIN_DEPENDS message_generation message_runtime geometry_msgs actionlib std_msgs actionlib_msgs +) + diff --git a/utils/mil_msgs/README.txt b/utils/mil_msgs/README.txt new file mode 100644 index 00000000..3aa7b83d --- /dev/null +++ b/utils/mil_msgs/README.txt @@ -0,0 +1,6 @@ +This is the beggining of the mil_msgs ros package. This is still in the process of being adapted from the legacy uf-common package. Be sure to update old references to uf_common if you find them around our repos. + +This package should contain any .msg, .srv, or .action files that are platform agnostic and have potential to be used by multiiple robots. + +Note: +The 'msg_helpers' and 'param_helpers' headers are a better fit for the mil_dev_tools package but I (David Soto) will not be transitioning them at the moment. (Feel free to do so!) diff --git a/utils/mil_msgs/action/MoveTo.action b/utils/mil_msgs/action/MoveTo.action new file mode 100644 index 00000000..824ca3fe --- /dev/null +++ b/utils/mil_msgs/action/MoveTo.action @@ -0,0 +1,11 @@ +# goal. copies PoseTwistStamped. +Header header +mil_msgs/PoseTwist posetwist +float64 speed +bool uncoordinated # false goes in a straight line, true achieves some components before others +float64 linear_tolerance # distance from goal for result to be sent +float64 angular_tolerance +--- +# result +--- +# feedback diff --git a/utils/mil_msgs/msg/Acceleration.msg b/utils/mil_msgs/msg/Acceleration.msg new file mode 100644 index 00000000..680a69dd --- /dev/null +++ b/utils/mil_msgs/msg/Acceleration.msg @@ -0,0 +1,2 @@ +geometry_msgs/Vector3 linear +geometry_msgs/Vector3 angular diff --git a/utils/mil_msgs/msg/DepthStamped.msg b/utils/mil_msgs/msg/DepthStamped.msg new file mode 100644 index 00000000..87158d40 --- /dev/null +++ b/utils/mil_msgs/msg/DepthStamped.msg @@ -0,0 +1,4 @@ +Header header + +# A depth in meters +float64 depth diff --git a/utils/mil_msgs/msg/PoseTwist.msg b/utils/mil_msgs/msg/PoseTwist.msg new file mode 100644 index 00000000..a66d185a --- /dev/null +++ b/utils/mil_msgs/msg/PoseTwist.msg @@ -0,0 +1,3 @@ +geometry_msgs/Pose pose +geometry_msgs/Twist twist +Acceleration acceleration diff --git a/utils/mil_msgs/msg/PoseTwistStamped.msg b/utils/mil_msgs/msg/PoseTwistStamped.msg new file mode 100644 index 00000000..631c3523 --- /dev/null +++ b/utils/mil_msgs/msg/PoseTwistStamped.msg @@ -0,0 +1,3 @@ +# pose is in header.frame_id. twist is in frame defined by pose (ie. body) +Header header +PoseTwist posetwist diff --git a/utils/mil_msgs/msg/RangeStamped.msg b/utils/mil_msgs/msg/RangeStamped.msg new file mode 100644 index 00000000..a9381076 --- /dev/null +++ b/utils/mil_msgs/msg/RangeStamped.msg @@ -0,0 +1,4 @@ +Header header + +# A range in meters +float64 range diff --git a/utils/mil_msgs/msg/VelocityMeasurement.msg b/utils/mil_msgs/msg/VelocityMeasurement.msg new file mode 100644 index 00000000..ba14a4bf --- /dev/null +++ b/utils/mil_msgs/msg/VelocityMeasurement.msg @@ -0,0 +1,3 @@ +geometry_msgs/Vector3 direction +float64 velocity +float64 correlation diff --git a/utils/mil_msgs/msg/VelocityMeasurements.msg b/utils/mil_msgs/msg/VelocityMeasurements.msg new file mode 100644 index 00000000..1161df02 --- /dev/null +++ b/utils/mil_msgs/msg/VelocityMeasurements.msg @@ -0,0 +1,2 @@ +Header header +VelocityMeasurement[] velocity_measurements diff --git a/utils/mil_msgs/package.xml b/utils/mil_msgs/package.xml new file mode 100644 index 00000000..96494287 --- /dev/null +++ b/utils/mil_msgs/package.xml @@ -0,0 +1,32 @@ + + mil_msgs + 1.0.0 + Common ROS msgs,srvs and actions for MIL repos + Forrest Voight + David Soto + + BSD + + http://github.com/uf-mil/mil_common + Forrest Voight + catkin + + message_runtime + actionlib + std_msgs + message_generation + geometry_msgs + actionlib_msgs + + message_runtime + actionlib + std_msgs + message_generation + geometry_msgs + actionlib_msgs + + + + + + diff --git a/utils/mil_msgs/srv/CameraToLidarTransform.srv b/utils/mil_msgs/srv/CameraToLidarTransform.srv new file mode 100644 index 00000000..152ab55a --- /dev/null +++ b/utils/mil_msgs/srv/CameraToLidarTransform.srv @@ -0,0 +1,12 @@ +Header header #Stamp of time point was seen for tf +geometry_msgs/Point point #X and Y of point in camera frame, Z is ignored +uint16 tolerance #Number of pixels the projected 3D lidar point can be from the target point to be included in the response +--- +bool success #True if at least one point found in lidar and transformed +geometry_msgs/Point[] transformed #Points in 3-D in camera frame if success is true +geometry_msgs/Point closest #3D point that is the closest to the target point when transformed and projected +geometry_msgs/Vector3 normal #Normal unit vector in camera frame estimated from transformed points +float64 distance #mean z of transformed points +string error #Describes when went wrong if success if false +string CLOUD_NOT_FOUND=pointcloud not found +string NO_POINTS_FOUND=no points diff --git a/utils/mil_tools/CMakeLists.txt b/utils/mil_tools/CMakeLists.txt new file mode 100644 index 00000000..0b304579 --- /dev/null +++ b/utils/mil_tools/CMakeLists.txt @@ -0,0 +1,50 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mil_tools) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -pedantic -Wall -std=c++11") + +find_package(catkin REQUIRED COMPONENTS + rostest + std_msgs + rospy + roscpp + cv_bridge + cmake_modules +) + +find_package(Eigen 3 REQUIRED) + +catkin_package( + DEPENDS Eigen + INCLUDE_DIRS + include + LIBRARIES + mil_tools + CATKIN_DEPENDS + roscpp + cv_bridge +) + +# Add folders to be run by python nosetests +if(CATKIN_ENABLE_TESTING) + catkin_add_nosetests(test) +endif() + +catkin_python_setup() + +include_directories( + include + SYSTEM + ${catkin_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${Eigen_INCLUDE_DIRS} +) + +add_library(mil_tools + src/mil_tools/mil_tools.cpp +) +target_include_directories(mil_tools PUBLIC include) +target_link_libraries(mil_tools ${catkin_LIBRARIES} ${Boost_LIBRARIES}) + +install(DIRECTORY include/ + DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}) + diff --git a/utils/mil_tools/include/mil_tools/mil_tools.hpp b/utils/mil_tools/include/mil_tools/mil_tools.hpp new file mode 100644 index 00000000..30a7b63e --- /dev/null +++ b/utils/mil_tools/include/mil_tools/mil_tools.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace mil_tools{ + +static const double PI = 3.1415926535897932; + + +// Returns a vector of topic names (std::string) that end with "image_rect_color" +// if color is false then it looks for those that end in "image_rect" +std::vector getRectifiedImageTopics(bool color = true); + +// converts raw string literals to std:string's +inline std::string operator "" _s(const char* str, size_t /*length*/) +{ + return std::string(str); +} + + +// Sorts contours by curve length +void sortContours(std::vector>& contour_vec, bool ascending=true); + +} diff --git a/utils/mil_tools/include/mil_tools/msg_helpers.h b/utils/mil_tools/include/mil_tools/msg_helpers.h new file mode 100644 index 00000000..fddbfbfe --- /dev/null +++ b/utils/mil_tools/include/mil_tools/msg_helpers.h @@ -0,0 +1,77 @@ +#pragma once + +#include +#include + +namespace mil_tools { + +template +inline T make_rgba(double r, double g, double b, double a) { + T c; + c.r = r; + c.g = g; + c.b = b; + c.a = a; + return c; +} + +template +inline T make_xyz(double x, double y, double z) { + T p; + p.x = x; + p.y = y; + p.z = z; + return p; +} + +template +inline T vec2xyz(Eigen::Vector3d v) { + return make_xyz(v(0), v(1), v(2)); +} +template +inline T vec2xyz(tf::Vector3 v) { + return make_xyz(v.x(), v.y(), v.z()); +} +template +inline Eigen::Vector3d xyz2vec(T m) { + return Eigen::Vector3d(m.x, m.y, m.z); +} + +inline Eigen::Vector3d vec2vec(tf::Vector3 v) { return Eigen::Vector3d(v[0], v[1], v[2]); } +inline tf::Vector3 vec2vec(Eigen::Vector3d v) { return tf::Vector3(v[0], v[1], v[2]); } +inline Eigen::Quaterniond quat2quat(const tf::Quaternion &q) { + return Eigen::Quaterniond(q[3], q[0], q[1], q[2]); +} +inline tf::Quaternion quat2quat(const Eigen::Quaterniond &q) { + return tf::Quaternion(q.x(), q.y(), q.z(), q.w()); +} + +template +inline T make_xyzw(double x, double y, double z, double w) { + T q; + q.x = x; + q.y = y; + q.z = z; + q.w = w; + return q; +} + +template +inline T vec2xyzw(Eigen::Vector4d v) { + return vec2xyzw(v(0), v(1), v(2), v(3)); +} +template +inline Eigen::Quaterniond xyzw2quat(T m) { + return Eigen::Quaterniond(m.w, m.x, m.y, m.z); +} +template +inline T vec2wxyz(Eigen::Vector4d v) { + return make_xyzw(v(1), v(2), v(3), v(0)); +} +inline tf::Quaternion vec2quat(Eigen::Vector4d v) { return tf::Quaternion(v[0], v[1], v[2], v[3]); } +template +inline T quat2xyzw(Eigen::Quaterniond q) { + return make_xyzw(q.x(), q.y(), q.z(), q.w()); +} +} + diff --git a/utils/mil_tools/include/mil_tools/param_helpers.h b/utils/mil_tools/include/mil_tools/param_helpers.h new file mode 100644 index 00000000..4e2881a1 --- /dev/null +++ b/utils/mil_tools/include/mil_tools/param_helpers.h @@ -0,0 +1,125 @@ +#pragma once + +#include +#include +#include + +#include "msg_helpers.h" + +namespace mil_tools { + +void fail(std::string const &error_string) { throw std::runtime_error(error_string); } +template +void fail(FirstType first, SecondType second, MoreTypes... more) { + std::ostringstream ss; + ss << first << second; + return fail(ss.str(), more...); +} + +template +void require(bool cond, ErrorDescTypes... error_desc) { + if (!cond) { + fail(error_desc...); + } +} + +template +bool _getParam(ros::NodeHandle &nh, const std::string &name, Eigen::Matrix &res) { + XmlRpc::XmlRpcValue my_list; + if (!nh.getParam(name, my_list)) return false; + require(my_list.getType() == XmlRpc::XmlRpcValue::TypeArray, "param ", name, " must be an array"); + if (N != Eigen::Dynamic) { + require(my_list.size() == N, "param ", name, "must have length ", N, " (is ", my_list.size(), + ")"); + } + + res.resize(my_list.size()); + for (int32_t i = 0; i < my_list.size(); i++) { + if (my_list[i].getType() == XmlRpc::XmlRpcValue::TypeDouble) { + res(i) = static_cast(my_list[i]); + } else if (my_list[i].getType() == XmlRpc::XmlRpcValue::TypeInt) { + res(i) = static_cast(my_list[i]); + } else { + fail("param ", name, "[", i, "] is not numeric"); + } + } + return true; +} +template +bool _getParam(ros::NodeHandle &nh, const std::string &name, Eigen::Matrix &res) { + static_assert(N != 0, "doesn't work for N = 0"); + static_assert(M != 1, "wrong template specialization used?"); + + XmlRpc::XmlRpcValue my_list; + if (!nh.getParam(name, my_list)) return false; + require(my_list.getType() == XmlRpc::XmlRpcValue::TypeArray, "param ", name, " must be an array"); + require(my_list.size() >= 1, "param " + name + " must not have zero length"); + if (N != Eigen::Dynamic) { + require(my_list.size() == N, "param ", name, "must have length ", N, " (is ", my_list.size(), + ")"); + } + + require(my_list[0].getType() == XmlRpc::XmlRpcValue::TypeArray, "param ", name, + "[0] must be a list"); + if (M != Eigen::Dynamic) { + require(my_list[0].size() == M, "param ", name, "[0] must have length ", M, " (is ", + my_list[0].size(), ")"); + } + + res.resize(my_list.size(), my_list[0].size()); + for (int32_t i = 0; i < my_list.size(); i++) { + XmlRpc::XmlRpcValue row = my_list[i]; + require(row.getType() == XmlRpc::XmlRpcValue::TypeArray, "param ", name, "[", i, + "] must be a list"); + require(row.size() == my_list[0].size(), "param ", name, "[", i, "]'s length doesn't match"); + for (int32_t j = 0; j < row.size(); j++) { + XmlRpc::XmlRpcValue entry = row[j]; + if (entry.getType() == XmlRpc::XmlRpcValue::TypeDouble) { + res(i, j) = static_cast(entry); + } else if (entry.getType() == XmlRpc::XmlRpcValue::TypeInt) { + res(i, j) = static_cast(entry); + } else { + fail("param ", name, "[", i, ", ", j, "] is not numeric"); + } + } + } + return true; +} +template +bool _getParam(ros::NodeHandle &nh, const std::string &name, T &res) { + return nh.getParam(name, res); +} +template <> +bool _getParam(ros::NodeHandle &nh, const std::string &name, ros::Duration &res) { + double x; + if (!nh.getParam(name, x)) return false; + res = ros::Duration(x); + return true; +} +template <> +bool _getParam(ros::NodeHandle &nh, const std::string &name, unsigned int &res) { + int x; + if (!nh.getParam(name, x)) return false; + if (x < 0) { + fail("param ", name, " must be >= 0"); + } + res = static_cast(x); + return true; +} + +template +T getParam(ros::NodeHandle &nh, std::string name) { + T res; + require(_getParam(nh, name, res), "param ", name, " required"); + return res; +} +template +T getParam(ros::NodeHandle &nh, std::string name, T default_value) { + T res; + if (!_getParam(nh, name, res)) { + return default_value; + } + return res; +} +} + diff --git a/utils/mil_tools/mil_misc_tools/__init__.py b/utils/mil_tools/mil_misc_tools/__init__.py new file mode 100644 index 00000000..fb9f9b64 --- /dev/null +++ b/utils/mil_tools/mil_misc_tools/__init__.py @@ -0,0 +1,2 @@ +from download import * +from text_effects import * diff --git a/utils/mil_tools/mil_misc_tools/download.py b/utils/mil_tools/mil_misc_tools/download.py new file mode 100644 index 00000000..988d9567 --- /dev/null +++ b/utils/mil_tools/mil_misc_tools/download.py @@ -0,0 +1,57 @@ +import urllib2 +import os +import zipfile +import cStringIO as StringIO + +''' +This file contains utilities for downloading a file from the internet +We're using this because I don't want to track 20MB files in Git. + +[1] Extracting zipfiles + http://stackoverflow.com/questions/9431918 + +[2] Unzip binary directly + http://stackoverflow.com/questions/18966672 + +[3] Download a file via http + http://stackoverflow.com/questions/22676 +''' + + +def download_and_unzip(url, output_dir): + '''Download and unzip a file at $url, + then put the contained files at $output_dir + ''' + try: + html = download(url) + except: + raise(IOError("Could not load file at {}".format(url))) + + fake_file = StringIO.StringIO(html) + + zip_ = zipfile.ZipFile(fake_file, "r") + for file_path in zip_.namelist(): + path, file_name = os.path.split(file_path) + file_like = zip_.open(file_path) + + f = open(os.path.join(output_dir, file_name), 'w') + f.write(file_like.read()) + f.close() + + +def download(url, output_filename=None): + '''Download a file at $url, and return the html + If you set an output location, it will also write the file + ''' + response = urllib2.urlopen(url) + html = response.read() + if output_filename is not None: + f = open(output_filename, 'w') + f.write(html) + f.close() + return html + + +if __name__ == '__main__': + sub_model_url = "http://goo.gl/f0ennf?gdriveurl" + download_and_unzip(sub_model_url, '.') diff --git a/utils/mil_tools/mil_misc_tools/text_effects.py b/utils/mil_tools/mil_misc_tools/text_effects.py new file mode 100644 index 00000000..15e6121d --- /dev/null +++ b/utils/mil_tools/mil_misc_tools/text_effects.py @@ -0,0 +1,146 @@ +class Colors(): + red = '\033[31m' + green = '\033[32m' + yellow = '\033[33m' + blue = '\033[34m' + purple = '\033[35m' + cyan = '\033[36m' + white = '\033[37m' + + underline = '\033[2m' + bold = '\033[1m' + negative = '\033[3m' + + reset = '\033[0m' + + def __getattr__(self, arg): + # If we get a non existent color, return the reset color + return self.reset + + +class Printer(object): + def __init__(self, string="", autospace=False): + self._autospace = autospace + self._string = string + + # Default adding text + self.text = lambda text: self + text + + # Colors + self.red = lambda text: self + (Colors.red + str(text) + Colors.reset) + self.green = lambda text: self + (Colors.green + str(text) + Colors.reset) + self.yellow = lambda text: self + (Colors.yellow + str(text) + Colors.reset) + self.blue = lambda text: self + (Colors.blue + str(text) + Colors.reset) + self.purple = lambda text: self + (Colors.purple + str(text) + Colors.reset) + self.cyan = lambda text: self + (Colors.cyna + str(text) + Colors.reset) + self.white = lambda text: self + (Colors.white + str(text) + Colors.reset) + + # Text effects + self.underline = lambda text: Printer(self._string + Colors.underline + str(text) + Colors.reset) + self.bold = lambda text: Printer(self._string + Colors.bold + str(text) + Colors.reset) + self.negative = lambda text: Printer(self._string + Colors.negative + str(text) + Colors.reset) + + # For passing in custom formatting + self.custom = lambda text, effect: self + (effect + str(text) + Colors.reset) + + def __repr__(self): + return self._string + Colors.reset + __str__ = __repr__ + + def __add__(self, other): + extra_space = ' ' if self._autospace and self._string is not '' else '' + return Printer(self._string + extra_space + str(other), self._autospace) + + @property + def set_red(self): + return Printer(self._string + Colors.red) + + @property + def set_green(self): + return Printer(self._string + Colors.green) + + @property + def set_yellow(self): + return Printer(self._string + Colors.yellow) + + @property + def set_blue(self): + return Printer(self._string + Colors.blue) + + @property + def set_purple(self): + return Printer(self._string + Colors.purple) + + @property + def set_cyan(self): + return Printer(self._string + Colors.cyan) + + @property + def set_white(self): + return Printer(self._string + Colors.white) + + @property + def reset(self): + return Printer(self._string + Colors.reset) + + def space(self, count=1): + return Printer(self._string + ' ' * count) + + def newline(self, count=1): + return Printer(self._string + '\n' * count) + + def enable_autospaces(self): + self._autospace = False + + def disable_autospaces(self): + self._autospace = True + + +class FprintFactory(object): + def __init__(self, title=None, time=None, msg_color=None, auto_bold=True, newline=1): + assert time is None or callable(time), "`time` should be `None` for no printing or a function that generates a timestamp." + assert msg_color is None or isinstance(msg_color, str), "`msg_color` should be `None` for default printing or a string color." + assert isinstance(auto_bold, bool), "`auto_bold` should be true or false if messages should be printed as bold by default or not" + assert newline is None or isinstance(newline, int), "`newline` should be the number of newlines after the text (default 1)" + + # All these can be overwritten if not specified here + self.title = title # Title to print with each message + self.time = time # Either `None` for no printing or a function that generates a timestamp + self.msg_color = msg_color # Either `None` for deafult printing or a string color + self.auto_bold = auto_bold # Should each message be bolded by default + self.newline = newline # The number of newlines characters to add to the end + + self.printer = Printer() + + def fprint(self, text, **kwargs): + title = kwargs.get("title", self.title) + time = kwargs.get("time", self.time) + msg_color = kwargs.get("msg_color", self.msg_color) + auto_bold = kwargs.get("auto_bold", self.auto_bold) + newline = kwargs.get("newline", self.newline) + + message = self.printer + if title is not None: + message = message.set_blue.bold(title).reset.space() + + if time is not None: + t = time() + message = message.bold(t).space() + + message += ": " + + if auto_bold: + text = str(self.printer.bold(text)) + + if msg_color is not None: + message = message.custom(text, getattr(Colors, msg_color)) + else: + message = message.text(text) + + if newline == 1: + print message + else: + print message.newline(newline - 1) + +# Standard instantiation +fprint = FprintFactory().fprint diff --git a/utils/mil_tools/mil_ros_tools/__init__.py b/utils/mil_tools/mil_ros_tools/__init__.py new file mode 100644 index 00000000..b52b2b05 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/__init__.py @@ -0,0 +1,8 @@ +from init_helpers import * +from image_helpers import * +from msg_helpers import * +from threading_helpers import * +from geometry_helpers import * +from rviz_helpers import * +from cv_debug import * +from bag_crawler import * diff --git a/utils/mil_tools/mil_ros_tools/bag_crawler.py b/utils/mil_tools/mil_ros_tools/bag_crawler.py new file mode 100644 index 00000000..0ec37b79 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/bag_crawler.py @@ -0,0 +1,71 @@ + +#!/usr/bin/python +""" +This file wis written by the team at UF MIL for the 2016 robosub competition. + +github.com/uf-mil +""" +import rosbag +from cv_bridge import CvBridge +import tqdm + + +class BagCrawler(object): + + def __init__(self, bag_path): + self.bag_path = bag_path + self.bag = rosbag.Bag(self.bag_path) + self.bridge = CvBridge() + + def convert(self, msg): + img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8') + return img + + def crawl(self, topic=None, is_image=False, max_msgs=float('inf')): + num_seen = 0 + num_msgs = 0 + bar = tqdm.tqdm(total=self.bag.get_message_count()) + if is_image: + topic = self.image_topics[0] + for msg_topic, msg, t in self.bag.read_messages(): + bar.update(num_msgs) + num_msgs += 1 + if msg_topic != topic: + continue + if num_seen > max_msgs: + break + num_seen += 1 + yield msg + bar.close() + + @property + def image_topics(self, cam="right"): + all_topics = self.bag.get_type_and_topic_info()[1].keys() + all_types = self.bag.get_type_and_topic_info()[1].values() + topics = [all_topics[k] for k, topic in enumerate(all_topics) if (all_types[k][0] == 'sensor_msgs/Image')] + if cam == "right": + topics = [topics[i] for i, t in enumerate(topics) if "right" in t] + if cam == "left": + topics = [topics[i] for i, t in enumerate(topics) if "left" in t] + return topics + + @property + def image_info_topics(self, cam="right"): + all_topics = self.bag.get_type_and_topic_info()[1].keys() + all_types = self.bag.get_type_and_topic_info()[1].values() + topics = [all_topics[k] for k, topic in enumerate(all_topics) if (all_types[k][0] == 'sensor_msgs/CameraInfo')] + if cam == "right": + topics = [topics[i] for i, t in enumerate(topics) if "right" in t] + if cam == "left": + topics = [topics[i] for i, t in enumerate(topics) if "left" in t] + return topics + +if __name__ == '__main__': + import cv2 + + bag = 'test.bag' + bc = BagCrawler(bag) + + for image in bc.crawl(topic=bc.image_topics[0]): + cv2.imshow('current_image', image) + cv2.waitKey(3) diff --git a/utils/mil_tools/mil_ros_tools/cv_debug.py b/utils/mil_tools/mil_ros_tools/cv_debug.py new file mode 100644 index 00000000..56962e5f --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/cv_debug.py @@ -0,0 +1,108 @@ +"""Shows images for debugging purposes.""" +import cv2 +import numpy as np +from sensor_msgs.msg import Image +from cv_bridge import CvBridge +import rospy +import sys +___author___ = "Tess Bianchi" + + +class CvDebug(object): + """Class that contains methods that assist with debugging with images.""" + + def __init__(self, nh=None, w=1000, h=800, total=8, win_name="debug", wait=True): + """ + Initialize the Debug class. + + @param w = The width of the image that smaller images are added to + @param h = The height of the image that smaller images are added to + @param win_name = the name of the window that is shown in opencv + @param wait = whether or not to wait after showing the image + """ + self.width = w + self.height = h + self.img = np.zeros((h, w, 3), np.uint8) + self.total = total + self.hor_num = total / 2 + self.vert_num = 2 + self.max_width = w / self.hor_num + self.max_height = h / self.vert_num + self.wait = wait + self.nh = nh + + self.curr_w = 0 + self.curr_h = 0 + self.num_imgs = 0 + self.win_name = win_name + self.name_to_starting = {} + self.bridge = CvBridge() + self.base_topic = "/debug/" + self.topic_to_pub = {} + if nh is None: + self.pub = rospy.Publisher("/debug/image", Image, queue_size=10) + else: + self.pub = nh.advertise("/debug/image", Image) + + def add_image(self, img, name, wait=33, topic="image"): + """ + Add an image to show to either with a topic or using cv2.imshow. + + @param name = a unique key name for the image, use the same name if you want to switch out this image for another + @param wait = the amount of wait time for the imshow image + """ + color = "bgr8" + print img.shape + if len(img.shape) == 2 or img.shape[2] == 1: + color = "mono8" + + if topic != "image": + self._add_new_topic(img, name, wait, topic) + return + if self.wait: + wait = 0 + h, w = img.shape[0], img.shape[1] + + if w > h: + img = cv2.resize(img, (self.max_width, h * self.max_width / w)) + + if h > w: + img = cv2.resize(img, (w * self.max_height / h, self.max_height)) + h, w = img.shape[0], img.shape[1] + if name not in self.name_to_starting: + if self.num_imgs == self.total: + print "Too many images" + return + self.name_to_starting[name] = (self.curr_w, self.curr_h) + self.num_imgs += 1 + + self.curr_w += w + if self.num_imgs == self.total / 2: + self.curr_w = 0 + self.curr_h = self.max_height + if self.num_imgs > self.total / 2: + self.name_to_starting[name] = (self.curr_w, self.curr_h) + my_w, my_h = self.name_to_starting[name] + self.img[my_h: my_h + h, my_w: my_w + w] = img + if self.nh is None: + cv2.imshow("img", self.img) + if cv2.waitKey(wait) & 0xFF == ord('q'): + cv2.destroyAllWindows() + sys.exit() + + else: + self.pub.publish(self.bridge.cv2_to_imgmsg(self.img, color)) + + def _add_new_topic(self, img, name, wait, topic): + color = "bgr8" + if len(img.shape) == 2 or img.shape[2] == 1: + color = "mono8" + pub = None + if topic in self.topic_to_pub.keys(): + pub = self.topic_to_pub[topic] + elif self.nh is None: + pub = rospy.Publisher("/debug/" + topic, Image, queue_size=10) + elif self.nh is not None: + pub = self.nh.advertise("/debug/" + topic, Image) + self.topic_to_pub[topic] = pub + pub.publish(self.bridge.cv2_to_imgmsg(img, color)) diff --git a/utils/mil_tools/mil_ros_tools/fix_bag.py b/utils/mil_tools/mil_ros_tools/fix_bag.py new file mode 100755 index 00000000..2fafdbe1 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/fix_bag.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +import argparse +import rosbag +import rospy +from sensor_msgs.msg import CameraInfo +from roslib.message import get_message_class + +class BagFixer(): + ''' + Dictionary of topics to remap. If ends in /, remaps everything after + Otherwise, topic must match exactly + ''' + def fix_topic(self, topic): + for k, t in self.topic_map.iteritems(): + if topic.find(k) == 0: + return t+topic[len(k):] + return topic + + def fix_tf(self, tf): + for i, t in enumerate(tf.transforms): + tf.transforms[i].header.frame_id = self.fix_frame(tf.transforms[i].header.frame_id) + tf.transforms[i].child_frame_id = self.fix_frame(tf.transforms[i].child_frame_id) + return tf + + def fix_frame(self, frame): + fixed_frame = self.frame_map.get(frame) + return fixed_frame if fixed_frame != None else frame + + def fix_bag(self, infile, outfile=None): + if outfile == None: + split = infile.rsplit('.bag', 1) + outfile = split[0]+'_fixed.bag' + out = rosbag.Bag(outfile, 'w') + bag = rosbag.Bag(infile) + for topic, msg, time in bag.read_messages(): + topic = self.fix_topic(topic) + if hasattr(msg, 'header') and msg.header._type == 'std_msgs/Header': + msg.header.frame_id = self.fix_frame(msg.header.frame_id) + if msg._type == 'tf2_msgs/TFMessage' or msg._type == 'tf/tfMessage': + msg = self.fix_tf(msg) + out.write(topic, msg, time) + out.flush() + + def fix_strings(self, strings): + if type(strings) == dict: + return strings + assert type(strings) == list + d = {} + for s in strings: + split = s.split(':') + assert len(split) == 2 + d[split[0]] = split[1] + return d + + def __init__(self, topic_map={}, frame_map={}): + self.topic_map = self.fix_strings(topic_map) + self.frame_map = self.fix_strings(frame_map) + print self.topic_map + print self.frame_map + #self.fix_bag() + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Fix bag topics/frame_ids') + parser.add_argument('--in', '-i', dest='infile', type=str, required=True, + help='Bag to read and fix') + parser.add_argument('--out', '-o', type=str, dest='outfile', + help='Bag to output with fixed content, defaults to INPUTFILENAME_fixed.bag') + parser.add_argument('--topics',required=False, nargs='+', type=str, metavar='oldtopic:newtopic', + help="list of topics to remap, seperated by a colon, ex /down_cam/:/camera/down/ /my_odom:/odom\ + If ends in a slash (ex: /cameras/:/cams/, all topics after slash will be remaped") + parser.add_argument('--frames',required=False, nargs='+', type=str, metavar='oldframe:newframe', + help="list of frame_ids in headers to be maped, ex: my_camera:cam_front") + args = parser.parse_args() + fixer = BagFixer(args.topics, args.frames) + fixer.fix_bag(args.infile, args.outfile) + #fixer = BagFixer(args.inbag, args.outbag) + + diff --git a/utils/mil_tools/mil_ros_tools/func_helpers.py b/utils/mil_tools/mil_ros_tools/func_helpers.py new file mode 100644 index 00000000..8b2fc4c1 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/func_helpers.py @@ -0,0 +1,38 @@ +class Cache(object): + """No support for **kwargs**""" + def __init__(self, func): + self.call_dict = {} + self.func = func + + def __call__(self, *args): + if args not in self.call_dict.keys(): + result = self.func(*args) + self.call_dict[args] = result + else: + result = self.call_dict[args] + return result + + +@Cache +def add(a, b): + print 'adding', a, b + return a + b + + +@Cache +def gooberstein(data): + print "Being called" + return data + "balls" + + +if __name__ == '__main__': + + print add(1, 2) + print add(1, 2) + + print add(2, 3) + print add(2, 3) + + print gooberstein("hello") + print gooberstein("hello") + print gooberstein("hello") diff --git a/utils/mil_tools/mil_ros_tools/geometry_helpers.py b/utils/mil_tools/mil_ros_tools/geometry_helpers.py new file mode 100644 index 00000000..f8fdd962 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/geometry_helpers.py @@ -0,0 +1,141 @@ +from __future__ import division +import numpy as np +from tf.transformations import quaternion_from_euler, euler_from_quaternion, random_quaternion +from msg_helpers import numpy_quat_pair_to_pose +from geometry_msgs.msg import Quaternion + + +def rotate_vect_by_quat(v, q): + ''' + Rotate a vector by a quaterion. + v' = q*vq + + q should be [x,y,z,w] (standard ros convention) + ''' + cq = np.array([-q[0], -q[1], -q[2], q[3]]) + cq_v = tf.transformations.quaternion_multiply(cq, v) + v = tf.transformations.quaternion_multiply(cq_v, q) + v[1:] *= -1 # Only seemed to work when I flipped this around, is there a problem with the math here? + return np.array(v)[:3] + + +def make_rotation(vector_a, vector_b): + '''Determine a 3D rotation that rotates A onto B + In other words, we want a matrix R that aligns a with b + + >> R = make_rotation(a, b) + >> p = R.dot(a) + >> np.cross(p, a) + >>> array([0.0, 0.0, 0.0]) + + [1] Calculate Rotation Matrix to align Vector A to Vector B in 3d? + http://math.stackexchange.com/questions/180418 + [2] N. Ho, Finding Optimal Rotation...Between Corresponding 3D Points + http://nghiaho.com/?page_id=671 + ''' + unit_a = normalize(vector_a) + unit_b = normalize(vector_b) + + v = np.cross(unit_a, unit_b) + s = np.linalg.norm(v) + + c = np.dot(unit_a, unit_b) + + skew_cross = skew_symmetric_cross(v) + skew_squared = np.linalg.matrix_power(skew_cross, 2) + + if np.isclose(c, 1.0, atol=1e-4): + R = np.eye(3) + return R + elif np.isclose(c, -1.0, atol=1e-4): + R = np.eye(3) + R[2, 2] *= -1 + return R + + normalization = (1 - c) / (s ** 2) + + R = np.eye(3) + skew_cross + (skew_squared * normalization) + + # Address the reflection case + if np.linalg.det(R) < 0: + R[:, 3] *= -1 + + return R + + +def skew_symmetric_cross(a): + '''Return the skew symmetric matrix representation of a vector + [1] https://en.wikipedia.org/wiki/Cross_product#Skew-symmetric_matrix + ''' + assert len(a) == 3, "a must be in R3" + skew_symm = np.array([ + [+0.00, -a[2], +a[1]], + [+a[2], +0.00, -a[0]], + [-a[1], +a[0], +0.00], + ], dtype=np.float32) + return skew_symm + + +def deskew(A): + return np.array([A[2, 1], A[0, 2], A[1, 0]], dtype=np.float32) + + +def normalize(vector): + return vector / np.linalg.norm(vector) + + +def compose_transformation(R, t): + '''Compose a transformation from a rotation matrix and a translation matrix''' + transformation = np.zeros((4, 4)) + transformation[:3, :3] = R + transformation[3, :3] = t + transformation[3, 3] = 1.0 + return transformation + + +def project_pt_to_plane(point, plane_normal): + dist = np.dot(plane_normal, point) + projected = point - (dist * plane_normal) + return projected + + +def clip_norm(vector, lower_bound, upper_bound): + '''Return a vector pointing the same direction as $vector, + with maximum norm $bound + if norm(vector) < bound, return vector unchanged + + Like np.clip, but for vector norms + ''' + norm = np.linalg.norm(vector) + if lower_bound < norm < upper_bound: + return np.copy(vector) + if norm < lower_bound: + v_new = (vector * lower_bound) / norm + else: + v_new = (vector * upper_bound) / norm + return v_new + + +def quaternion_matrix(q): + mat_h = transformations.quaternion_matrix(q) + return mat_h[:3, :3] / mat_h[3, 3] + +def quat_to_euler(q): + ''' Approximate a quaternion as a euler rotation vector''' + + euler_rot_vec = euler_from_quaternion([q.x, q.y, q.z, q.w]) + final = np.array(([euler_rot_vec[0], euler_rot_vec[1], euler_rot_vec[2]])) + return final + + +def euler_to_quat(rotvec): + ''' convert a euler rotation vector into a ROS quaternion ''' + + quat = quaternion_from_euler(rotvec[0], rotvec[1], rotvec[2]) + return Quaternion(quat[0], quat[1], quat[2], quat[3]) + +def random_pose(_min, _max): + ''' Gives a random pose in the xyz range `_min` to `_max` ''' + pos = np.random.uniform(low=_min, high=_max, size=3) + quat = random_quaternion() + return numpy_quat_pair_to_pose(pos, quat) diff --git a/utils/mil_tools/mil_ros_tools/image_helpers.py b/utils/mil_tools/mil_ros_tools/image_helpers.py new file mode 100644 index 00000000..d40b2fe7 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/image_helpers.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +''' +Note: + The repeated use of CvBridge (instead of using make_image_msg and get_image_msg in the classes) + is intentional, to avoid the use of a global cvbridge, and to avoid reinstantiating a CvBrige for each use. +''' +import rospy +import numpy as np +from os import path +from cv_bridge import CvBridge, CvBridgeError +from sensor_msgs.msg import Image, CameraInfo +from mil_ros_tools import wait_for_param + + +def get_parameter_range(parameter_root): + ''' + ex: parameter_root='/vision/buoy/red' + this will then fetch /vision/buoy/red/hsv_low and hsv_high + ''' + low_param, high_param = parameter_root + '/hsv_low', parameter_root + '/hsv_high' + + rospy.logwarn("Blocking -- waiting for parameters {} and {}".format(low_param, high_param)) + + wait_for_param(low_param) + wait_for_param(high_param) + low = rospy.get_param(low_param) + high = rospy.get_param(high_param) + + rospy.loginfo("Got {} and {}".format(low_param, high_param)) + return np.array([low, high]).transpose() + + +def make_image_msg(cv_image, encoding='bgr8'): + '''Take a cv image, and produce a ROS image message''' + bridge = CvBridge() + image_message = bridge.cv2_to_imgmsg(cv_image, encoding) + return image_message + + +def get_image_msg(ros_image, encoding='bgr8'): + '''Take a ros image message, and yield an opencv image''' + bridge = CvBridge() + cv_image = bridge.imgmsg_to_cv2(ros_image, desired_encoding=encoding) + return cv_image + + +class Image_Publisher(object): + def __init__(self, topic, encoding="bgr8", queue_size=1): + '''Create an essentially normal publisher, that will publish images without conversion hassle''' + self.bridge = CvBridge() + self.encoding = encoding + self.im_pub = rospy.Publisher(topic, Image, queue_size=queue_size) + + def publish(self, cv_image): + try: + image_message = self.bridge.cv2_to_imgmsg(cv_image, self.encoding) + self.im_pub.publish(image_message) + except CvBridgeError, e: + # Intentionally absorb CvBridge Errors + rospy.logerr(e) + + +class Image_Subscriber(object): + def __init__(self, topic, callback=None, encoding="bgr8", queue_size=1): + '''Calls $callback on each image every time a new image is published on $topic + Assumes topic of type "sensor_msgs/Image" + This behaves like a conventional subscriber, except handling the additional image conversion + ''' + if callback is None: + callback = lambda im: setattr(self, 'last_image', im) + + self.encoding = encoding + self.camera_info = None + self.last_image_time = None + self.last_image = None + self.im_sub = rospy.Subscriber(topic, Image, self.convert, queue_size=queue_size) + + root_topic, image_subtopic = path.split(topic) + self.info_sub = rospy.Subscriber(root_topic + '/camera_info', CameraInfo, self.info_cb, queue_size=queue_size) + + self.bridge = CvBridge() + self.callback = callback + + def wait_for_camera_info(self, timeout=10): + ''' + Blocks until camera info has been received. + Note: 'timeout' is in seconds. + ''' + rospy.logwarn("Blocking -- waiting at most %d seconds for camera info." % timeout) + + timeout = rospy.Duration(timeout) + rospy.sleep(.1) # Make sure we don't have negative time + start_time = rospy.Time.now() + + while (rospy.Time.now() - start_time < timeout) and (not rospy.is_shutdown()): + if self.camera_info is not None: + rospy.loginfo("Camera info found!") + return self.camera_info + rospy.sleep(.2) + + rospy.logerr("Camera info not found.") + raise Exception("Camera info not found.") + + def info_cb(self, msg): + """The path trick here is a hack""" + self.info_sub.unregister() + self.camera_info = msg + + def convert(self, data): + self.last_image_time = data.header.stamp + try: + image = self.bridge.imgmsg_to_cv2(data, desired_encoding=self.encoding) + self.callback(image) + except CvBridgeError, e: + # Intentionally absorb CvBridge Errors + rospy.logerr(e) diff --git a/utils/mil_tools/mil_ros_tools/init_helpers.py b/utils/mil_tools/mil_ros_tools/init_helpers.py new file mode 100644 index 00000000..75441670 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/init_helpers.py @@ -0,0 +1,54 @@ +import rospy +import rostest +import time + + +def wait_for_param(param_name, timeout=None, poll_rate=0.1): + '''Blocking wait for a parameter named $parameter_name to exist + Poll at frequency $poll_rate + Once the parameter exists, return get and return it. + + This function intentionally leaves failure logging duties to the developer + ''' + start_time = time.time() + rate = rospy.Rate(poll_rate) + while not rospy.is_shutdown(): + + # Check if the parameter now exists + if rospy.has_param(param_name): + return rospy.get_param(param_name) + + # If we exceed a defined timeout, return None + if timeout is not None: + if time.time() - start_time > timeout: + return None + + # Continue to poll at poll_rate + rate.sleep() + + +def wait_for_subscriber(node_name, topic, timeout=5.0): + '''Blocks until $node_name subscribes to $topic + Useful mostly in integration tests -- + I would counsel against use elsewhere + ''' + end_time = time.time() + timeout + + # Wait for time-out or ros-shutdown + while (time.time() < end_time) and (not rospy.is_shutdown()): + subscribed = rostest.is_subscriber( + rospy.resolve_name(topic), + rospy.resolve_name(node_name) + ) + # Success scenario: node subscribes + if subscribed: + break + time.sleep(0.1) + + # Could do this with a while/else + # But chose to explicitly check + success = rostest.is_subscriber( + rospy.resolve_name(topic), + rospy.resolve_name(node_name) + ) + return success diff --git a/utils/mil_tools/mil_ros_tools/msg_helpers.py b/utils/mil_tools/mil_ros_tools/msg_helpers.py new file mode 100644 index 00000000..15cca0b1 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/msg_helpers.py @@ -0,0 +1,194 @@ +import numpy as np +from tf import transformations +import geometry_msgs.msg as geometry_msgs +import std_msgs.msg as std_msgs +import nav_msgs.msg as nav_msgs +import rospy + + +def rosmsg_to_numpy(rosmsg, keys=None): + '''Convert an arbitrary ROS msg to a numpy array + With no additional arguments, it will by default handle: + Point2D, Point3D, Vector3D, and quaternions + + Ex: + quat = Quaternion(1.0, 0.0, 0.0, 0.0) + quat is not a vector, you have quat.x, quat.y,... and you can't do math on that + + But wait, there's hope! + rosmsg_to_numpy(quat) -> array([1.0, 0.0, 0.0, 0.0]) + + Yielding a vector, which you can do math on! + + Further, imagine a bounding box message, BB, with properties BB.x, BB.h, BB.y, and BB.w + + rosmsg_to_numpy(BB, ['x', 'h', 'y', 'w']) -> array([BB.x, BB.h, BB.y, BB.w]) + + or... + rosmsg_to_numpy(some_Pose2D, ['x', 'y', 'yaw']) = array([x, y, yaw]) + + Note: + - This function is designed to handle the most common use cases (vectors, points and quaternions) + without requiring any additional arguments. + ''' + if keys is None: + keys = ['x', 'y', 'z', 'w'] + output_array = [] + for key in keys: + # This is not necessarily the fastest way to do this + if hasattr(rosmsg, key): + output_array.append(getattr(rosmsg, key)) + else: + break + assert len(output_array) is not 0, "Input type {} has none of these attributes {}.".format(type(rosmsg).__name__, keys) + return np.array(output_array).astype(np.float32) + + else: + output_array = np.zeros(len(keys), np.float32) + for n, key in enumerate(keys): + output_array[n] = getattr(rosmsg, key) + + return output_array + + +def pose_to_numpy(pose): + '''TODO: Unit-test + returns (position, orientation) + ''' + position = rosmsg_to_numpy(pose.position) + orientation = rosmsg_to_numpy(pose.orientation) + return position, orientation + + +def twist_to_numpy(twist): + '''TODO: Unit-test + Convert a twist message into a tuple of numpy arrays + returns (linear, angular) + ''' + linear = rosmsg_to_numpy(twist.linear) + angular = rosmsg_to_numpy(twist.angular) + return linear, angular + + +def posetwist_to_numpy(posetwist): + pose = pose_to_numpy(posetwist.pose) + twist = twist_to_numpy(posetwist.twist) + return pose, twist + + +def odometry_to_numpy(odom): + '''TODO: Unit-test + Convert an odometry message into a tuple of numpy arrays + returns (pose, twist, pose_covariance, twist_covariance) + ''' + pose = pose_to_numpy(odom.pose.pose) + pose_covariance = np.array(odom.pose.covariance).reshape(6, 6) + + twist = twist_to_numpy(odom.twist.twist) + twist_covariance = np.array(odom.twist.covariance).reshape(6, 6) + + return pose, twist, pose_covariance, twist_covariance + + +def wrench_to_numpy(wrench): + force = rosmsg_to_numpy(wrench.force) + torque = rosmsg_to_numpy(wrench.torque) + return force, torque + + +def numpy_to_point(vector): + np_vector = np.array(vector) + if np_vector.size == 2: + np_vector = np.append(np_vector, 0) # Assume user is give 2d point + + return geometry_msgs.Point(*np_vector) + + +def numpy_to_quaternion(np_quaternion): + return geometry_msgs.Quaternion(*np_quaternion) + + +def numpy_to_twist(linear_vel, angular_vel): + '''TODO: Unit-test + ''' + return geometry_msgs.Twist(linear=geometry_msgs.Vector3(*linear_vel), angular=geometry_msgs.Vector3(*angular_vel)) + + +def numpy_to_wrench(forcetorque): + return geometry_msgs.Wrench( + force=geometry_msgs.Vector3(*forcetorque[:3]), + torque=geometry_msgs.Vector3(*forcetorque[3:]) + ) + + +def numpy_matrix_to_quaternion(np_matrix): + '''Given a 3x3 rotation matrix, return its geometry_msgs Quaternion''' + assert np_matrix.shape == (3, 3), "Must submit 3x3 rotation matrix" + pose_mat = np.eye(4) + pose_mat[:3, :3] = np_matrix + np_quaternion = transformations.quaternion_from_matrix(pose_mat) + return geometry_msgs.Quaternion(*np_quaternion) + + +def numpy_pair_to_pose(np_translation, np_rotation_matrix): + '''Convert a R, t pair to a geometry_msgs Pose''' + orientation = numpy_matrix_to_quaternion(np_rotation_matrix) + position = numpy_to_point(np_translation) + return geometry_msgs.Pose(position=position, orientation=orientation) + + +def numpy_quat_pair_to_pose(np_translation, np_quaternion): + orientation = numpy_to_quaternion(np_quaternion) + position = numpy_to_point(np_translation) + return geometry_msgs.Pose(position=position, orientation=orientation) + + +def make_header(frame='/body', stamp=None): + if stamp is None: + try: + stamp = rospy.Time.now() + except rospy.ROSInitException: + stamp = rospy.Time(0) + + header = std_msgs.Header( + stamp=stamp, + frame_id=frame + ) + return header + + +def make_wrench_stamped(force, torque, frame='/body'): + '''Make a WrenchStamped message without all the fuss + Frame defaults to body + ''' + wrench = geometry_msgs.WrenchStamped( + header=make_header(frame), + wrench=geometry_msgs.Wrench( + force=geometry_msgs.Vector3(*force), + torque=geometry_msgs.Vector3(*torque) + ) + ) + return wrench + + +def make_pose_stamped(position, orientation, frame='/body'): + wrench = geometry_msgs.WrenchStamped( + header=make_header(frame), + pose=geometry_msgs.Pose( + force=geometry_msgs.Vector3(*position), + orientation=geometry_msgs.Quaternion(*orientation) + ) + ) + return wrench + + +def odom_sub(topic, callback): + def wrapped_callback(*args): + msg = args[-1] + callback(odometry_to_numpy(msg)) + + return rospy.Subscriber(topic, nav_msgs.Odometry, wrapped_callback, queue_size=1) + +def ros_to_np_3D(msg): + xyz_array = np.array(([msg.x, msg.y, msg.z])) + return xyz_array diff --git a/utils/mil_tools/mil_ros_tools/online_bagger/CMakeLists.txt b/utils/mil_tools/mil_ros_tools/online_bagger/CMakeLists.txt new file mode 100644 index 00000000..782eb679 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/online_bagger/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mil_ros_tools) + +find_package(catkin REQUIRED COMPONENTS + rospy + message_generation + +) +add_service_files( + DIRECTORY srv + FILES + BaggerCommands.srv +) + +generate_messages() +catkin_package() + diff --git a/utils/mil_tools/mil_ros_tools/online_bagger/config/online_bagger_example.yaml b/utils/mil_tools/mil_ros_tools/online_bagger/config/online_bagger_example.yaml new file mode 100644 index 00000000..f88f6d57 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/online_bagger/config/online_bagger_example.yaml @@ -0,0 +1,15 @@ +# list an arbitrary set of topics to subscribe to and stream: + +stream_time: 10 # seconds + +topics: [["/odom", 300] , + ["/absodom", 300], + ["/stereo/left/camera_info" ], + ["/stereo/left/image_raw", 20], + ["/stereo/right/camera_info", 20], + ["/stereo/right/image_raw", 20], + ["/velodyne_points", 300], + ["/ooka", 100]] + +# comment out if default home/user/online_bagger is desired +# bag_package_path: '' \ No newline at end of file diff --git a/utils/mil_tools/mil_ros_tools/online_bagger/launch/online_bagger_example.launch b/utils/mil_tools/mil_ros_tools/online_bagger/launch/online_bagger_example.launch new file mode 100644 index 00000000..f25eba59 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/online_bagger/launch/online_bagger_example.launch @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/utils/mil_tools/mil_ros_tools/online_bagger/nodes/online_bagger.py b/utils/mil_tools/mil_ros_tools/online_bagger/nodes/online_bagger.py new file mode 100755 index 00000000..c64ee881 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/online_bagger/nodes/online_bagger.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python + +from __future__ import division +import rospy +import rosbag +import rostopic +import os +import resource +from collections import deque +import itertools +import datetime +from mil_ros_tools.srv import BaggerCommands + +""" +To call call service call the '/online_bagger/dump' service with the BaggerCommands.srv +Amount and Unit. Doing this will generate a rosbag with the +last x number of seconds saved + +For example: + + bagging_server = rospy.ServiceProxy('/online_bagger/dump', BaggerCommands) + bag_status = bagging_server(bag_name, bag_time) + bag_name = name of bag (leave blank to use default name: current date and time) + Provide an empty string: '' to bag everything + bag_time = float32 (leave blank to dump entire bag): + Provide 0.0, or 0 to bag everything +""" + +class OnlineBagger(object): + + def __init__(self): + + """ + Make dictionary of dequeues. + Subscribe to set of topics defined by the yaml file in directory + Stream topics up to a given stream time, dump oldest messages when limit is reached + Set up service to bag n seconds of data default to all of available data + """ + + self.successful_subscription_count = 0 # successful subscriptions + self.iteration_count = 0 # number of iterations + self.streaming = True + self.get_params() + self.make_dicts() + + self.bagging_service = rospy.Service('/online_bagger/dump', BaggerCommands, + self.start_bagging) + + self.subscribe_loop() + rospy.loginfo('subscriber list: %s', self.subscriber_list) + + def get_params(self): + + """ + Retrieve parameters from param server. + """ + + self.subscriber_list = rospy.get_param('/online_bagger/topics') + self.dir = rospy.get_param('/online_bagger/bag_package_path', default=os.environ['HOME']) + self.stream_time = rospy.get_param('/online_bagger/stream_time', default=30) # seconds + + rospy.loginfo('subscriber list: %s', self.subscriber_list) + rospy.loginfo('stream_time: %s seconds', self.stream_time) + + def make_dicts(self): + + """ + Make dictionary with sliceable deques() that will be filled with messages and time stamps. + + Subscriber list contains all of the topics, their stream time and their subscription status: + A True status for a given topic corresponds to a successful subscription + A False status indicates a failed subscription. + + Stream time for an individual topic is specified in seconds. + + For Example: + self.subscriber_list[0:1] = [['/odom', 300 ,False], ['/absodom', 300, True]] + + Indicates that '/odom' has not been subscribed to, but '/absodom' has been subscribed to + + self.topic_messages is a dictionary of deques containing a list of tuples. + Dictionary Keys contain topic names + Each value for each topic contains a deque + Each deque contains a list of tuples + Each tuple contains a message and its associated time stamp + + For example: + '/odom' is a potential topic name + self.topic_message['/odom'] is a deque + self.topic_message['/odom'][0] is the oldest message available in the deque + and its time stamp if available. It is a tuple with each element: (time_stamp, msg) + self.topic_message['/odom'][0][0] is the time stamp for the oldest message + self.topic_message['/odom'][0][1] is the message associated with the oldest topic + """ + + self.topic_messages = {} + self.topic_list = [] + + class SliceableDeque(deque): + def __getitem__(self, index): + if isinstance(index, slice): + return type(self)(itertools.islice(self, index.start, + index.stop, index.step)) + return deque.__getitem__(self, index) + + for topic in self.subscriber_list: + if len(topic) == 1: + topic.append(self.stream_time) + rospy.loginfo('no stream time provided, default used for: %s', topic) + self.topic_messages[topic[0]] = SliceableDeque(deque()) + topic.append(False) + self.topic_list.append(topic[0]) + + rospy.loginfo('topics status: %s', self.subscriber_list) + + def subscribe_loop(self): + + """ + Continue to subscribe until at least one topic is successful, + then break out of loop and be called in the callback function to prevent the function + from locking up. + """ + + i = 0 + # if self.successful_subscription_count == 0 and not rospy.is_shutdown(): + while self.successful_subscription_count == 0 and not rospy.is_shutdown(): + self.subscribe() + rospy.sleep(0.1) + i = i + 1 + if i % 1000 == 0: + rospy.logdebug('still subscribing!') + + def subscribe(self): + + """ + Subscribe to the topics defined in the yaml configuration file + + Function checks subscription status True/False of each topic + if True: topic has already been sucessfully subscribed to + if False: topic still needs to be subscribed to and + subscriber will be run. + + Each element in self.subscriber list is a list [topic, Bool] + where the Bool tells the current status of the subscriber (sucess/failure). + + Return number of topics that failed subscription + """ + + for topic in self.subscriber_list: + if not topic[2]: + msg_class = rostopic.get_topic_class(topic[0]) + if msg_class[1] is not None: + self.successful_subscription_count = self.successful_subscription_count + 1 + rospy.Subscriber(topic[0], msg_class[0], + lambda msg, _topic=topic[0]: self.bagger_callback(msg, _topic)) + + topic[2] = True # successful subscription + + def get_topic_duration(self, topic): + + """ + Return current time duration of topic + """ + + return self.topic_messages[topic][-1][0] - self.topic_messages[topic][0][0] + + def get_header_time(self, msg): + + """ + Retrieve header time if available + """ + + if hasattr(msg, 'header'): + return msg.header.stamp + else: + return rospy.get_rostime() + + def get_time_index(self, topic, requested_seconds): + + """ + Return the index for the time index for a topic at 'n' seconds from the end of the dequeue + For example, to bag the last 10 seconds of data, the index for 10 seconds back from the most + recent message can be obtained with this function. + The number of requested seconds should be the number of seoncds desired from + the end of deque. (ie. requested_seconds = 10 ) + If the desired time length of the bag is greater than the available messages it will output a + message and return how ever many seconds of data are avaiable at the moment. + Seconds is of a number type (not a rospy.Time type) (ie. int, float) + """ + + topic_duration = self.get_topic_duration(topic).to_sec() + + ratio = requested_seconds / topic_duration + index = int(self.get_topic_message_count(topic) * (1 - min(ratio, 1))) + + self.bag_report = 'The requested %s seconds were bagged' % requested_seconds + + if index == 0: + self.bag_report = 'Only %s seconds of the request %s seconds were available, all \ + messages were bagged' % (topic_duration, requested_seconds) + return index + + def bagger_callback(self, msg, topic): + + """ + Streaming callback function, stops streaming during bagging process + also pops off msgs from dequeue if stream size is greater than specified stream_time + + Stream, callback function does nothing if streaming is not active + """ + + if not self.streaming: + return + + self.iteration_count = self.iteration_count + 1 + time = self.get_header_time(msg) + + self.topic_messages[topic].append((time, msg)) + + time_diff = self.get_topic_duration(topic) + + # verify streaming is popping off and recording topics + if self.iteration_count % 100 == 0: + rospy.logdebug('time_difference: %s', time_diff.to_sec()) + rospy.logdebug('topic: %s', topic) + rospy.logdebug('topic type: %s', type(msg)) + rospy.logdebug('number of topic messages: %s', self.get_topic_message_count(topic)) + + index = self.topic_list.index(topic) + + while time_diff > rospy.Duration(self.subscriber_list[index][1]) and not rospy.is_shutdown(): + self.topic_messages[topic].popleft() + time_diff = self.get_topic_duration(topic) + + # re subscribe to failed topics if available later + self.subscribe() + + def get_topic_message_count(self, topic): + + """ + Return number of messages available in a topic + """ + + return len(self.topic_messages[topic]) + + def get_total_message_count(self): + + """ + Returns total number of messages across all topics + """ + + total_message_count = 0 + for topic in self.topic_messages.keys(): + total_message_count = total_message_count + self.get_topic_message_count(topic) + + return total_message_count + + def set_bag_directory(self,bag_name=''): + + """ + Create ros bag save directory + If no bag name is provided, the current date/time is used as default. + """ + + types = ('', None, False) + + if bag_name in types: + bag_name = str(datetime.date.today()) + '-' + str(datetime.datetime.now().time())[0:8] + + directory = os.path.join(self.dir, 'online_bagger/') + + rospy.loginfo('bag directory: %s', directory) + rospy.loginfo('bag name: %s', bag_name) + + if not os.path.exists(directory): + os.makedirs(directory) + + self.bag = rosbag.Bag(os.path.join(directory, bag_name + '.bag'), 'w') + + def set_bagger_status(self): + + """ + Set status of online bagger + """ + + self.bagger_status = 'Subscriber List: ' + str(self.subscriber_list) + ' Message Count: ' \ + + str(self.get_total_message_count()) + + def start_bagging(self, req): + + """ + Dump all data in dictionary to bags, temporarily stops streaming + during the bagging process, resumes streaming when over. + """ + + self.streaming = False + self.set_bag_directory(req.bag_name) + self.set_bagger_status() + + requested_seconds = req.bag_time + + for topic in self.subscriber_list: + if topic[2] == False: + continue + + topic = topic[0] + rospy.loginfo('topic: %s', topic) + + # if no number is provided or a zero is given, bag all messages + types = (0, 0.0, None) + if req.bag_time in types: + bag_index = 0 + self.bag_report = 'All messages were bagged' + + # get time index the normal way + else: + bag_index = self.get_time_index(topic, requested_seconds) + + messages = 0 # message number in a given topic + for msgs in self.topic_messages[topic][bag_index:]: + messages = messages + 1 + self.bag.write(topic, msgs[1], t=msgs[0]) + if messages % 100 == 0: # print every 100th topic, type and time + rospy.loginfo('topic: %s, message type: %s, time: %s', + topic, type(msgs[1]), type(msgs[0])) + + # empty deque when done writing to bag + self.topic_messages[topic].clear() + + rospy.loginfo('Bag Report: %s', self.bagger_status) + self.bag.close() + rospy.loginfo('bagging finished!') + + self.streaming = True + return self.bagger_status + +if __name__ == "__main__": + rospy.init_node('online_bagger') + stream = OnlineBagger() + rospy.spin() diff --git a/utils/mil_tools/mil_ros_tools/online_bagger/package.xml b/utils/mil_tools/mil_ros_tools/online_bagger/package.xml new file mode 100644 index 00000000..93456a41 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/online_bagger/package.xml @@ -0,0 +1,13 @@ + + + mil_ros_tools + 1.0.0 + The mil_ros_tools package + Israelle Widjaja + MIT + + catkin + rospy + rospy + + diff --git a/utils/mil_tools/mil_ros_tools/online_bagger/srv/BaggerCommands.srv b/utils/mil_tools/mil_ros_tools/online_bagger/srv/BaggerCommands.srv new file mode 100644 index 00000000..cc52d84e --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/online_bagger/srv/BaggerCommands.srv @@ -0,0 +1,4 @@ +string bag_name +float32 bag_time +--- +string status diff --git a/utils/mil_tools/mil_ros_tools/rviz_helpers.py b/utils/mil_tools/mil_ros_tools/rviz_helpers.py new file mode 100644 index 00000000..61bb0381 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/rviz_helpers.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +from __future__ import division + +import numpy as np +import rospy +import visualization_msgs.msg as visualization_msgs + +from geometry_msgs.msg import Pose, Vector3, Point +from std_msgs.msg import ColorRGBA + +import mil_ros_tools + +rviz_pub = rospy.Publisher("visualization", visualization_msgs.Marker, queue_size=3) + +def draw_sphere(position, color, scaling=(0.11, 0.11, 0.11), m_id=4, frame='/base_link'): + pose = Pose( + position=mil_ros_tools.numpy_to_point(position), + orientation=mil_ros_tools.numpy_to_quaternion([0.0, 0.0, 0.0, 1.0]) + ) + + marker = visualization_msgs.Marker( + ns='wamv', + id=m_id, + header=mil_ros_tools.make_header(frame=frame), + type=visualization_msgs.Marker.SPHERE, + action=visualization_msgs.Marker.ADD, + pose=pose, + color=ColorRGBA(*color), + scale=Vector3(*scaling), + lifetime=rospy.Duration(), + ) + rviz_pub.publish(marker) + +def draw_ray_3d(pix_coords, camera_model, color, frame='/stereo_front', m_id=0, length=35): + marker = make_ray( + base=np.array([0.0, 0.0, 0.0]), + direction=np.array(camera_model.projectPixelTo3dRay(pix_coords)), + length=length, + color=color, + frame=frame, + m_id=m_id + ) + + rviz_pub.publish(marker) + +def make_ray(base, direction, length, color, frame='/base_link', m_id=0, **kwargs): + '''Handle the frustration that Rviz cylinders are designated by their center, not base''' + marker = visualization_msgs.Marker( + ns='wamv', + id=m_id, + header=mil_ros_tools.make_header(frame=frame), + type=visualization_msgs.Marker.LINE_STRIP, + action=visualization_msgs.Marker.ADD, + color=ColorRGBA(*color), + scale=Vector3(0.05, 0.05, 0.05), + points=map(lambda o: Point(*o), [base, direction * length]), + lifetime=rospy.Duration(), + **kwargs + ) + return marker diff --git a/utils/mil_tools/mil_ros_tools/threading_helpers.py b/utils/mil_tools/mil_ros_tools/threading_helpers.py new file mode 100644 index 00000000..255298d4 --- /dev/null +++ b/utils/mil_tools/mil_ros_tools/threading_helpers.py @@ -0,0 +1,30 @@ +def thread_lock(lock): + '''Use an existing thread lock to thread-lock a function + This prevents the function from being executed by multiple threads at once + + Example: + import threading + lock = threading.Lock() + + @thread_lock(lock) + def my_function(a, b, c): + print a, b, c + ''' + + def lock_thread(function_to_lock): + '''thread_lock(function) -> locked function + Thread locking decorator + If you use this as a decorator for a function, it will apply a threading lock during the execution of that function, + Which guarantees that no ROS callbacks can change the state of data while it is executing. This + is critical to make sure that a new message being sent doesn't cause a weird serial interruption + ''' + def locked_function(*args, **kwargs): + # Get threading lock + with lock: + result = function_to_lock(*args, **kwargs) + # Return, pretending the function hasn't changed at all + return result + # Return the function with locking added + return locked_function + + return lock_thread diff --git a/utils/mil_tools/mil_tools/__init__.py b/utils/mil_tools/mil_tools/__init__.py new file mode 100644 index 00000000..ebd6cc7f --- /dev/null +++ b/utils/mil_tools/mil_tools/__init__.py @@ -0,0 +1,2 @@ +from mil_ros_tools import * +from mil_misc_tools import * diff --git a/utils/mil_tools/nodes/clicked_point_recorder.py b/utils/mil_tools/nodes/clicked_point_recorder.py new file mode 100755 index 00000000..51814ece --- /dev/null +++ b/utils/mil_tools/nodes/clicked_point_recorder.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python + +""" +Listens to RVIZ clicked points, storing points in a csv file + +Usage: rosrun mil_tools clicked_point_recorder.py +""" + +import rospy +import datetime +import csv +from geometry_msgs.msg import PointStamped +class ClickedPointRecorder: + def __init__(self): + self.points = [] + self.point_sub = rospy.Subscriber("/clicked_point", PointStamped, self.point_cb) + + def point_to_dict(self, point): + return { 'seq': point.header.seq, + 'secs' : point.header.stamp.secs, + 'nsecs' : point.header.stamp.nsecs, + 'frame_id' : point.header.frame_id, + 'x' : point.point.x, + 'y': point.point.y, + 'z': point.point.z} + + def write_file(self): + time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + filename = 'clickedpoints{}.csv'.format(time) + with open(filename ,'wx') as csvfile: + fieldnames = ['seq','secs','nsecs','frame_id','x','y','z'] + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + for p in self.points: + d = self.point_to_dict(p) + writer.writerow(d) + rospy.loginfo("Writing points to {}".format(filename)) + + def point_cb(self,point): + rospy.loginfo("Received new point: {}".format(point)) + self.points.append(point) + +if __name__ == '__main__': + rospy.init_node('clicked_point_recorder') + recorder = ClickedPointRecorder() + def shutdown_cb(): + recorder.write_file() + rospy.on_shutdown(shutdown_cb) + rospy.spin() diff --git a/utils/mil_tools/nodes/ogrid_draw.py b/utils/mil_tools/nodes/ogrid_draw.py new file mode 100755 index 00000000..bd664701 --- /dev/null +++ b/utils/mil_tools/nodes/ogrid_draw.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +""" +Opens a neat window for drawing occupancy grids! + +""" +import rospy +import cv2 +import numpy as np +import argparse +import sys + +from geometry_msgs.msg import Pose +from nav_msgs.msg import OccupancyGrid, MapMetaData + +rospy.init_node("ogrid_draw_node", anonymous=True) + +class DrawGrid(object): + def __init__(self, height, width, image_path): + self.height, self.width = height, width + + self.clear_screen() + + if image_path: + self.make_image(image_path) + + cv2.namedWindow('Draw OccupancyGrid') + cv2.setMouseCallback('Draw OccupancyGrid', self.do_draw) + self.drawing = 0 + + def make_image(self, image_path): + img = cv2.imread(image_path, 0) + if img is None: + print "Image not found at '{}'".format(image_path) + return + + img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_CUBIC) + _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV) + self.img = np.clip(img, -1, 100) + + def clear_screen(self): + self.img = np.zeros((self.height, self.width), np.uint8) + + def do_draw(self, event, x, y, flags, param): + draw_vals = {1: 100, 2: 0} + + if event == cv2.EVENT_LBUTTONUP or event == cv2.EVENT_RBUTTONUP: + self.drawing = 0 + elif event == cv2.EVENT_LBUTTONDOWN: + self.drawing = 1 + elif event == cv2.EVENT_RBUTTONDOWN: + self.drawing = 2 + elif self.drawing != 0: + cv2.circle(self.img, (x, y), 5, draw_vals[self.drawing], -1) + +class OGridPub(object): + def __init__(self, image_path=None): + height = int(rospy.get_param("~grid_height", 800)) + width = int(rospy.get_param("~grid_width", 800)) + resolution = rospy.get_param("~grid_resolution", .3) + ogrid_topic = rospy.get_param("~grid_topic", "/ogrid") + + self.grid_drawer = DrawGrid(height, width, image_path) + self.ogrid_pub = rospy.Publisher(ogrid_topic, OccupancyGrid, queue_size=1) + + m = MapMetaData() + m.resolution = resolution + m.width = width + m.height = height + pos = np.array([-width * resolution / 2, -height * resolution / 2, 0]) + quat = np.array([0, 0, 0, 1]) + m.origin = Pose() + m.origin.position.x, m.origin.position.y = pos[:2] + + self.map_meta_data = m + + rospy.Timer(rospy.Duration(1), self.pub_grid) + + def pub_grid(self, *args): + grid = self.grid_drawer.img + + ogrid = OccupancyGrid() + ogrid.header.frame_id = '/enu' + ogrid.header.stamp = rospy.Time.now() + ogrid.info = self.map_meta_data + ogrid.data = np.subtract(np.flipud(grid).flatten(), 1).astype(np.int8).tolist() + + self.ogrid_pub.publish(ogrid) + +if __name__ == "__main__": + usage_msg = "Lets you draw an occupancy grid!" + desc_msg = "If you pass -i , the ogrid will be created based on thresholded black and white image." + + parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) + parser.add_argument('-i', '--image', action='store', type=str, + help="Path to an image to use as an ogrid.") + + # Roslaunch passes in some args we don't want to deal with (there may be a better way than this) + if '-i' in sys.argv: + args = parser.parse_args(sys.argv[1:]) + im_path = args.image + else: + im_path = None + + o = OGridPub(image_path=im_path) + + while not rospy.is_shutdown(): + cv2.imshow("Draw OccupancyGrid", o.grid_drawer.img) + k = cv2.waitKey(100) & 0xFF + + if k == 27: + break + elif k == 113: + o.grid_drawer.clear_screen() + + cv2.destroyAllWindows() diff --git a/utils/mil_tools/nodes/rviz_waypoint.py b/utils/mil_tools/nodes/rviz_waypoint.py new file mode 100755 index 00000000..e70817d7 --- /dev/null +++ b/utils/mil_tools/nodes/rviz_waypoint.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +from __future__ import division + +import txros +import numpy as np +from twisted.internet import defer +from geometry_msgs.msg import PoseStamped, PointStamped + +class RvizRepublisher(object): + @txros.util.cancellableInlineCallbacks + def init(self): + self.nh = yield txros.NodeHandle.from_argv("rviz_republisher") + self.point_republish = self.nh.advertise("/rviz_point", PointStamped) + self.pose_republish = self.nh.advertise("/rviz_goal", PoseStamped) + + self.rviz_goal = self.nh.subscribe("/move_base_simple/goal", PoseStamped) + self.clicked_point = self.nh.subscribe("/clicked_point", PointStamped) + + self.delay = .1 # s + self.publish_point() + self.publish_pose() + + @txros.util.cancellableInlineCallbacks + def publish_point(self): + yield self.clicked_point.get_next_message() + + while True: + yield self.nh.sleep(self.delay) + last_point = yield self.clicked_point.get_last_message() + self.point_republish.publish(last_point) + + @txros.util.cancellableInlineCallbacks + def publish_pose(self): + yield self.rviz_goal.get_next_message() + + while True: + yield self.nh.sleep(self.delay) + last_pose = yield self.rviz_goal.get_last_message() + self.pose_republish.publish(last_pose) + + +@txros.util.cancellableInlineCallbacks +def main(): + rr = RvizRepublisher() + yield rr.init() + + yield defer.Deferred() + + +txros.util.launch_main(main) diff --git a/utils/mil_tools/nodes/tf_fudger.py b/utils/mil_tools/nodes/tf_fudger.py new file mode 100755 index 00000000..ab7943b6 --- /dev/null +++ b/utils/mil_tools/nodes/tf_fudger.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +from __future__ import division + +import rospy +import tf +import tf.transformations as trns +import cv2 +import argparse +import sys +import numpy as np + +rospy.init_node("tf_fudger", anonymous=True) +br = tf.TransformBroadcaster() + +usage_msg = "Useful to test transforms between two frames." +desc_msg = "Pass the name of the parent frame and the child name as well as a an optional multiplier. \n\ +At anypoint you can press q to switch between displaying a quaternion and an euler angle. \n\ +Press 's' to exit and REPLACE or ADD the line in the tf launch file. Use control-c if you don't want to save." + +parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) +parser.add_argument(dest='tf_parent', + help="Parent tf frame.") +parser.add_argument(dest='tf_child', + help="Child tf frame.") +parser.add_argument('--range', action='store', type=float, default=3, + help="Maximum range allowed for linear displacement (in meters)") + +args = parser.parse_args(sys.argv[1:]) + +cv2.namedWindow('tf', cv2.WINDOW_NORMAL) + +# slider max values +x_max = 1000 +ang_max = 3000 + +# tf ranges (symmetric about zero) +x_range = args.range # meters +ang_range = 2 * np.pi # radians + +# tf fudging resolution +x_res = x_range / x_max +ang_res = ang_range / ang_max + +print "Distance resolution: {} meters\nAngular resolution: {} radians".format(x_res, ang_res) + +cv2.createTrackbar("x", 'tf', 0, x_max, lambda x: x) +cv2.createTrackbar("y", 'tf', 0, x_max, lambda x: x) +cv2.createTrackbar("z", 'tf', 0, x_max, lambda x: x) + +cv2.createTrackbar("roll", 'tf', 0, ang_max, lambda x: x) +cv2.createTrackbar("pitch", 'tf', 0, ang_max, lambda x: x) +cv2.createTrackbar("yaw", 'tf', 0, ang_max, lambda x: x) + +toTfLin = lambda x: x * x_res - 0.5 * x_range +toTfAng = lambda x: x * ang_res - 0.5 * ang_range + +toCvLin = lambda x: (x+0.5*x_range)/x_res +toCvAng = lambda x: (x+0.5*ang_range)/ang_res + +p = q = None +q_mode = False + +# Used for printing and saving +tf_line = '\n' +prd = 100 # This will get replaced if it needs to + +args.tf_child = args.tf_child[1:] if args.tf_child[0] == '/' else args.tf_child +args.tf_parent = args.tf_parent[1:] if args.tf_parent[0] == '/' else args.tf_parent + +listener = tf.TransformListener() + +p_original = (0,0,0) +rpy_original = (0,0,0) +try: + listener.waitForTransform(args.tf_parent,args.tf_child, rospy.Time(0), rospy.Duration(1)) + (trans,rot) = listener.lookupTransform(args.tf_parent,args.tf_child, rospy.Time(0)) + euler = trns.euler_from_quaternion(rot) + p_original = trans + rpy_original = euler +except tf.Exception: + print "TF not found, setting everything to zero" + +def set_bars(p, rpy): + cv2.setTrackbarPos("x", "tf",int( toCvLin(p[0]) ) ) + cv2.setTrackbarPos("y", "tf",int( toCvLin(p[1]) ) ) + cv2.setTrackbarPos("z", "tf",int( toCvLin(p[2]) ) ) + cv2.setTrackbarPos("roll", "tf",int( toCvAng(rpy[0]) )) + cv2.setTrackbarPos("pitch", "tf",int( toCvAng(rpy[1]) )) + cv2.setTrackbarPos("yaw", "tf",int( toCvAng(rpy[2]) )) + k = cv2.waitKey(100) & 0xFF + if k == ord('q'): + q_mode = not q_mode + +def reset(): + set_bars(p_original,rpy_original) + +reset() + +p_last = p_original +rpy_last = rpy_original + +while not rospy.is_shutdown(): + x, y, z = toTfLin(cv2.getTrackbarPos("x", "tf")), toTfLin(cv2.getTrackbarPos("y", "tf")), toTfLin(cv2.getTrackbarPos("z", "tf")) + p = (x, y, z) + rpy = (toTfAng(cv2.getTrackbarPos("roll", "tf")), toTfAng(cv2.getTrackbarPos("pitch", "tf")), toTfAng(cv2.getTrackbarPos("yaw", "tf"))) + q = tf.transformations.quaternion_from_euler(*rpy) + + if (not p == p_last) or (not rpy == rpy_last): + rpy_feedback = "xyz: {}, euler: {}".format([round(x, 5) for x in p], [round(np.degrees(x), 5) for x in rpy]) + q_feedback = "xyz: {}, q: {}".format([round(x, 5) for x in p], [round(x, 5) for x in q]) + print q_feedback if q_mode else rpy_feedback + p_last = p + rpy_last = rpy + + k = cv2.waitKey(100) & 0xFF + if k == ord('q'): + q_mode = not q_mode + + if k == ord('r'): + reset() + + if k == ord('z'): + set_bars((0, 0, 0), (0, 0, 0)) + + # This functionality was configured to replace lines in navigator's tf launch file, should refactor to be general later + ''' + if k == ord('s'): + # Save the transform in navigator_launch/launch/tf.launch replacing the line + import rospkg + rospack = rospkg.RosPack() + launch_path = rospack.get_path('navigator_launch') + launch_path += "/launch/subsystems/tf.launch" + + with open(launch_path, 'r') as f: + lines = f.readlines() + + last_static_pub = 0 + tab_level = '' + np.set_printoptions(suppress=True) + tf_line_to_add = tf_line.format(child=args.tf_child, p=p, q=np.round(q, 5), parent=args.tf_parent, prd="{prd}") + for i,line in enumerate(lines): + if args.tf_child in line and args.tf_parent in line: + tab_level = line[:line.find("<")] # The labs infront of the tf line + prd = int([x for x in line.split('"')[-2].split(" ") if x != ''][-1]) # Get the period from the tf line + lines[i] = tab_level + tf_line_to_add.format(prd=prd) + + print "Found line in tf.launch!" + print "Replacing: {}".format(line.replace(tab_level, '')[:-1]) + print " With: {}".format(lines[i].replace(tab_level, '')[:-1]) + break + + elif 'pkg="tf"' in line: + # Incase we don't find the tf line of interest to change, we want to insert the new tf line after the last tf line + tab_level = line[:line.find("<")] # The labs infront of the tf line + last_static_pub = i + + else: + print "Tf link not found between /{parent} /{child} in tf.launch.".format(parent=args.tf_parent, child=args.tf_child) + lines.insert(last_static_pub + 1, '\n' + tab_level + tf_line_to_add.format(prd=prd)) + print "Adding: {}".format(lines[last_static_pub + 1].replace(tab_level, '')[:-1]) + + with open(launch_path, 'w') as f: + for line in lines: + f.write(line) + + break + ''' + + br.sendTransform(p, q, rospy.Time.now(), args.tf_child, args.tf_parent) + +# Print out the tf static transform line with the fudged tf +print '\n',tf_line.format(child=args.tf_child, p=p, q=np.round(q, 5), parent=args.tf_parent, prd=prd) diff --git a/utils/mil_tools/nodes/tf_to_gazebo.py b/utils/mil_tools/nodes/tf_to_gazebo.py new file mode 100755 index 00000000..20f19d93 --- /dev/null +++ b/utils/mil_tools/nodes/tf_to_gazebo.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +import roslib +import rospy +import math +import tf +import geometry_msgs.msg +import numpy as np + + +def main(): + rospy.init_node('tf_to_gazebo') + do_cam_fix = rospy.get_param("~cam_fix", False) + tf_parent = rospy.get_param("~tf_parent", "/measurement") + listener = tf.TransformListener() + rate = rospy.Rate(10.0) + cam_fix_quat = (-0.5, 0.5 , -0.5 , -0.5) + + while not rospy.is_shutdown(): + try: + print "============= TRANFORMS ================" + for frame_id in listener.getFrameStrings(): + (trans,rot) = listener.lookupTransform(tf_parent,frame_id, rospy.Time(0)) + print "--" + print "Transform {} {}".format(tf_parent,frame_id) + if do_cam_fix: + rot = tf.transformations.quaternion_multiply(rot,cam_fix_quat) + print "(qx={}, qy={} , qz={}, qw={})".format(rot[0],rot[1],rot[2],rot[3]) + print "(x={}, y={}, z={})".format(trans[0],trans[1],trans[2]) + euler = tf.transformations.euler_from_quaternion( rot) + print "(Roll={}, Pitch={}, Yaw={})".format(euler[0], euler[1], euler[2]) + print " {} {} {} {} {} {} ".format(trans[0],trans[1],trans[2],euler[0], euler[1], euler[2]) + except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): + continue + rate.sleep() + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + print + diff --git a/utils/mil_tools/nodes/video_player b/utils/mil_tools/nodes/video_player new file mode 100755 index 00000000..1b4e14bc --- /dev/null +++ b/utils/mil_tools/nodes/video_player @@ -0,0 +1,134 @@ +#!/usr/bin/env python +""" +Usage: rosrun mil_dev_tools video_player _filename:=MyVideoFile.mp4 + rosrun mil_dev_tools video_player MyVideoFile.mp4 + +Utility to play video files into a ros topic +with some added conveniences like pausing and +scrolling through the video with a slider bar. + +Plays any videofile that OpenCV can open (mp4,etc) +Publishes to video_player/filename/image_raw + +Set the following rosparams for customization +~filename string what file to load +~slider boolean True to open window with slider bar and pause +~start_frames int number of frames into video to start playback at + +If slider is set to True (default), a window will open with a +slider bar for moving through the video. Press space in this window +to pause the video and update the slider position. While paused, +you can press s to go frame by frame. + +""" + +import sys +import os +import rospy +import cv2 +from sensor_msgs.msg import Image +from cv_bridge import CvBridge, CvBridgeError + +class RosVideoPlayer: + + def __init__(self): + self.bridge = CvBridge() + self.filename = rospy.get_param('~filename', sys.argv[1]) + self.image_pub = rospy.Publisher("video_player/" + os.path.splitext(os.path.basename(self.filename))[0] + "/image_raw", + Image,queue_size=10) + self.slider = rospy.get_param('~slider',True) + self.start_frame = rospy.get_param('~start_frames',0) + self.cap = cv2.VideoCapture(self.filename) + + self.width = self.cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH) + self.height = self.cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT) + + self.roi_y_offset = rospy.get_param('~y_offset', 0) + self.roi_x_offset = rospy.get_param('~x_offset', 0) + self.roi_height = rospy.get_param('~height', self.height) + self.roi_width = rospy.get_param('~width', self.width) + + self.num_frames = self.cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT) + self.fps = rospy.get_param('~fps', self.cap.get(cv2.cv.CV_CAP_PROP_FPS)) + self.cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, self.start_frame) + self.num_frames = self.cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT) + if (self.num_frames < 1): + raise Exception("Cannot read video {}".format(self.filename)) + + self.paused = False + self.ended = False + self.last_key = 255 + + if (self.slider): + cv2.namedWindow('Video Control') + cv2.createTrackbar('Frame','Video Control', int(self.start_frame), int(self.num_frames), self.trackbar_cb) + cv2.createTrackbar('ROI x_offset', 'Video Control', int(self.roi_x_offset), int(self.width)-1, self.roi_x_cb) + cv2.createTrackbar('ROI y_offset', 'Video Control', int(self.roi_y_offset), int(self.height)-1, self.roi_y_cb) + cv2.createTrackbar('ROI height', 'Video Control', int(self.roi_height), int(self.height), self.roi_height_cb) + cv2.createTrackbar('ROI width', 'Video Control', int(self.roi_width), int(self.width), self.roi_width_cb) + rospy.loginfo("Playing {} at {}fps starting at frame {} ({} Total Frames)".format(self.filename,self.fps,self.start_frame,self.num_frames)) + + def run(self): + r = rospy.Rate(self.fps) # 10hz + while not rospy.is_shutdown() and self.cap.isOpened() and not self.ended: + if self.slider: + k = cv2.waitKey(1) & 0xFF + if not k == self.last_key: + self.last_key = k + if k == 27: + return + elif k == 32: + self.pause() + elif k == 115 and self.paused: + self.one_frame() + if self.paused: + cv2.setTrackbarPos('Frame','Video Control', int(self.cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES))) + else: + self.one_frame() + r.sleep() + + def one_frame(self): + ret, frame = (None, None) + try: + ret, frame = self.cap.read() + except Exception as e: + print "Exception: {}".format(e) + if not ret: + if not self.ended: + rospy.loginfo("File {} ended".format(self.filename)) + self.ended = True + return + else: + self.ended = False + x1 = self.roi_x_offset + x2 = x1 + self.roi_width + y1 = self.roi_y_offset + y2 = y1 + self.roi_height + frame_roied = frame[y1:min(y2, frame.shape[0]), x1:min(x2, frame.shape[1])] + self.image_pub.publish(self.bridge.cv2_to_imgmsg(frame_roied, "bgr8")) + + def trackbar_cb(self,x): + self.cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,x) + + def roi_x_cb(self, d): + self.roi_x_offset = d + + def roi_y_cb(self, d): + self.roi_y_offset = d + + def roi_height_cb(self, d): + self.roi_height = d if d > 0 else 1 + + def roi_width_cb(self, d): + self.roi_width = d if d > 0 else 1 + + def pause(self): + self.paused = not self.paused + +def main(): + rospy.init_node('video_player', anonymous=True) + player = RosVideoPlayer() + player.run() + +if __name__ == '__main__': + main() diff --git a/utils/mil_tools/package.xml b/utils/mil_tools/package.xml new file mode 100644 index 00000000..03116810 --- /dev/null +++ b/utils/mil_tools/package.xml @@ -0,0 +1,21 @@ + + + mil_tools + 1.0.0 + Development tools used in MIL projects + Kevin Allen + David Soto + Matthew Langford + MIT + catkin + rostest + roscpp + roscpp + rospy + cv_bridge + cv_bridge + cmake_modules + + + + diff --git a/utils/mil_tools/readme.md b/utils/mil_tools/readme.md new file mode 100644 index 00000000..77fa0c13 --- /dev/null +++ b/utils/mil_tools/readme.md @@ -0,0 +1,10 @@ +sub8_ros_tools +============== + +Miscellaneous ROS tools that are useful for various tasks. + +Examples: + +# TODO +* The items that directly interact with ros topics or the parameter server do not yet have unittests, they require rostest. + diff --git a/utils/mil_tools/scripts/param_sever b/utils/mil_tools/scripts/param_sever new file mode 100755 index 00000000..4cddcace --- /dev/null +++ b/utils/mil_tools/scripts/param_sever @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +import os +import random +import yaml + +import rospy + + +rospy.init_node('param_saver', anonymous=True) + +class MyDumper(yaml.Dumper): + def represent_mapping(self, tag, mapping, flow_style=False): + return yaml.Dumper.represent_mapping(self, tag, mapping, flow_style) + +while not rospy.is_shutdown(): + rospy.sleep(rospy.Duration(3)) + + entries = rospy.get_param('~') + for entry in entries.itervalues(): + filename = entry['filename'] + file_basepath = entry['file_basepath'] + param_paths = entry['param_paths'] + + p = rospy.get_param(file_basepath) + data = yaml.dump(dict((k, v) for k, v in p.iteritems() if k in param_paths), Dumper=MyDumper) + + if os.path.exists(filename): + with open(filename, 'rb') as f: + if f.read() == data: + continue + + tmp_filename = '%s.%i' % (filename, random.randrange(10000)) + with open(tmp_filename, 'wb') as f: + f.write(data) + os.rename(tmp_filename, filename) + + print 'Saved %s to %s' % (file_basepath, filename) diff --git a/utils/mil_tools/setup.py b/utils/mil_tools/setup.py new file mode 100644 index 00000000..0c9533d6 --- /dev/null +++ b/utils/mil_tools/setup.py @@ -0,0 +1,11 @@ +## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD + +from distutils.core import setup +from catkin_pkg.python_setup import generate_distutils_setup + +# fetch values from package.xml +setup_args = generate_distutils_setup( + packages=['mil_ros_tools', 'mil_misc_tools', 'mil_tools'], +) + +setup(**setup_args) diff --git a/utils/mil_tools/src/mil_tools/mil_tools.cpp b/utils/mil_tools/src/mil_tools/mil_tools.cpp new file mode 100644 index 00000000..27bb316a --- /dev/null +++ b/utils/mil_tools/src/mil_tools/mil_tools.cpp @@ -0,0 +1,56 @@ +#include +#include +#include + +namespace mil_tools { + +using namespace std; + +vector getRectifiedImageTopics(bool color) +{ + // get all currently published topics from master + ros::master::V_TopicInfo master_topics; + ros::master::getTopics(master_topics); + + // Define lambda to determine if a topic is a rectified img topic + string target_pattern; + target_pattern = color? "image_rect_color" : "image_rect"; + auto isRectImgTopic = [&target_pattern](ros::master::TopicInfo topic_info) + { + // Find last slash + size_t final_slash = topic_info.name.rfind('/'); + + // Match end of topic name to image_rect pattern + if(final_slash == string::npos) + return false; + else + { + string topic_name_end = topic_info.name.substr(final_slash + 1); + return (topic_name_end.find(target_pattern) != string::npos)? true : false; + } + }; // end lambda isRectImgTopic + + // return list of rectified image topics + vector image_rect_topics; + for(ros::master::TopicInfo topic : master_topics) + { + if(isRectImgTopic(topic)) + image_rect_topics.push_back(topic.name); + } + return image_rect_topics; +} + + +void sortContours(vector>& contour_vec, bool ascending) +{ + auto comp_length = ascending? + [](vector const &contour1, vector const &contour2) + { return fabs(arcLength(contour1, true)) < fabs(arcLength(contour2, true)); } : + [](vector const &contour1, vector const &contour2) + { return fabs(arcLength(contour1, true)) > fabs(arcLength(contour2, true)); }; + + sort(contour_vec.begin(), contour_vec.end(), comp_length); +} + + +} diff --git a/utils/mil_tools/test/math_helpers_test.py b/utils/mil_tools/test/math_helpers_test.py new file mode 100644 index 00000000..cbf79480 --- /dev/null +++ b/utils/mil_tools/test/math_helpers_test.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from geometry_msgs.msg import Quaternion +from mil_ros_tools import quat_to_euler, euler_to_quat, normalize +from mil_ros_tools import compose_transformation + + +class TestROSTools(unittest.TestCase): + + def test_normalize_vector(self): + '''Test vector normalization''' + for k in range(10): + rand_vec = np.random.random(k) # Make a random k-length vector + + # Ignore the astronomically unlikely case that a vector has near 0 norm + if not np.isclose(np.sum(rand_vec), 0): + normalized = normalize(rand_vec) + norm = np.linalg.norm(normalized) + + # Test that the norm is 1 + np.testing.assert_almost_equal(norm, 1.0, err_msg="The normalized vector did not have length 1") + + def test_quat_to_euler(self): + ''' Test quaternion to euler angle ''' + + q = Quaternion(x=0.70711, y=0.0, z=0.0, w=0.70711) + numpy_array = quat_to_euler(q) + truth = np.array(([1.57079633, 0.0, 0.0])) + np.testing.assert_almost_equal(numpy_array, truth, err_msg="Quaternion to euler conversion incorrect") + + def test_compose_transformation(self): + ''' Test quaternion to euler angle ''' + R = np.array(([3, 6, 1], + [2, 5, 2], + [1, 4, 3])) + + t = np.array((1, 2, 3)) + + truth = np.array(([3, 6, 1, 0], + [2, 5, 2, 0], + [1, 4, 3, 0], + [1, 2, 3, 1])) + + test = compose_transformation(R, t) + np.testing.assert_almost_equal(test, truth, err_msg="Error composing transformation") + + def test_euler_to_quat(self): + ''' Test quaternion to euler angle ''' + + e = np.array(([1.57079633, 0.0, 0.0])) + testing = euler_to_quat(e) + # strip away ROS data to just test return values + # because unittest doesn't support ROS message operandsss + testing = np.array(([testing.x, testing.y, testing.z, testing.w])) + truth = np.array(([0.70710678, 0.0, 0.0, 0.70710678])) + np.testing.assert_almost_equal(testing, truth, err_msg="Incorrect euler to quaternion conversion") + + +if __name__ == '__main__': + suite = unittest.TestLoader().loadTestsFromTestCase(TestROSTools) + unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/utils/mil_tools/test/test_ros_tools.py b/utils/mil_tools/test/test_ros_tools.py new file mode 100644 index 00000000..6c4a518c --- /dev/null +++ b/utils/mil_tools/test/test_ros_tools.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +import unittest +import numpy as np +from geometry_msgs.msg import Quaternion, Vector3, Pose2D +from sensor_msgs.msg import Image +from mil_ros_tools import make_image_msg, get_image_msg +from mil_ros_tools import rosmsg_to_numpy, make_wrench_stamped +from mil_ros_tools import thread_lock +from mil_ros_tools import skew_symmetric_cross, make_rotation, normalize + + +class TestROSTools(unittest.TestCase): + def test_rosmsg_to_numpy_quaternion(self): + '''Test a rosmsg conversion for a geometry_msgs/Quaternion''' + # I know this is not a unit quaternion + q = Quaternion(x=0.7, y=0.7, z=0.1, w=0.2) + numpy_array = rosmsg_to_numpy(q) + + np.testing.assert_allclose( + np.array([0.7, 0.7, 0.1, 0.2]), + numpy_array + ) + + def test_rosmsg_to_numpy_vector(self): + '''Test a rosmsg conversion for a geometry_msgs/Vector''' + v = Vector3(x=0.1, y=99., z=21.) + numpy_array = rosmsg_to_numpy(v) + + np.testing.assert_allclose( + np.array([0.1, 99., 21.]), + numpy_array + ) + + def test_rosmsg_to_numpy_custom(self): + '''Test a rosmsg conversion for a custom msg''' + pose_2d = Pose2D(x=1.0, y=2.0, theta=3.14) + numpy_array = rosmsg_to_numpy(pose_2d, ['x', 'y', 'theta']) + + np.testing.assert_allclose( + np.array([1.0, 2.0, 3.14]), + numpy_array + ) + + def test_make_wrench_stamped(self): + '''Test that wrenchmaking works''' + for k in range(10): + force = np.random.random(3) * 10 + torque = np.random.random(3) * 10 + wrench_msg = make_wrench_stamped(force, torque, frame='/enu') + + msg_force = rosmsg_to_numpy(wrench_msg.wrench.force) # noqa + msg_torque = rosmsg_to_numpy(wrench_msg.wrench.torque) # noqa + self.assertIsNotNone(msg_force) + self.assertIsNotNone(msg_torque) + + def test_make_image_msg(self): + '''Test that make ros image message doesn't fail''' + im = np.ones((255, 255, 3)).astype(np.uint8) + im_msg = make_image_msg(im) + self.assertIsInstance(im_msg, Image) + + def test_get_image_msg(self): + '''Test that a made ros image can be returned to its original form''' + im = np.ones((255, 255, 3)).astype(np.uint8) + im_msg = make_image_msg(im) + + cv_im = get_image_msg(im_msg) + np.testing.assert_array_equal(im, cv_im) + + def test_thread_lock(self): + '''Test that the thread lock decorator correctly locks, in the correct order''' + class FakeLock(object): + def __init__(self): + self.entry = False + self.exit = False + + def __enter__(self): + self.entry = True + + def __exit__(self, *args): + self.exit = True + + fake_lock = FakeLock() + + @thread_lock(fake_lock) + def lock_test(a): + return (fake_lock.entry is True) and (fake_lock.exit is False) + + result = lock_test(1) + + self.assertTrue(fake_lock.entry, msg='Thread was never locked') + self.assertTrue(fake_lock.exit, msg='Thread was never released') + self.assertTrue(result, msg='Thread was not locked while the function was executed') + + def test_skew_symmetric_cross(self): + '''Test that the skew symmetric cross product matrix produces the definition + [1] https://en.wikipedia.org/wiki/Cross_product#Skew-symmetric_matrix + ''' + skew_sym = skew_symmetric_cross([1, 2, 3]) + truth = np.array([ + [+0, -3, +2], + [+3, +0, -1], + [-2, +1, +0], + ]) + np.testing.assert_allclose(skew_sym, truth, err_msg="Did not make a Skew-symmetric matrix. Pretty big screw-up imho.") + + def test_make_rotation(self): + '''Test several random vector pairs, and see if we can generate a valid alignment''' + scaling = 10 + for k in range(10): + p = (np.random.random(3) - 0.5) * scaling + q = (np.random.random(3) - 0.5) * scaling + + R = make_rotation(p, q) + p_rotated = R.dot(p) + + # Test that the matrix actually aligns p with q + np.testing.assert_allclose( + [0.0, 0.0, 0.0], np.cross(p_rotated, q), atol=1e-5, + err_msg="The generated rotation matrix did not align the input vectors, {} to {}".format( + p, q + ) + ) + self.assertGreater(np.dot(p_rotated, q), 0.0, msg="The rotation did wacky inversion") + + def test_normalize_vector(self): + '''Test vector normalization''' + for k in range(10): + rand_vec = np.random.random(k) # Make a random k-length vector + + # Ignore the astronomically unlikely case that a vector has near 0 norm + if not np.isclose(np.sum(rand_vec), 0): + normalized = normalize(rand_vec) + norm = np.linalg.norm(normalized) + + # Test that the norm is 1 + np.testing.assert_almost_equal(norm, 1.0, err_msg="The normalized vector did not have length 1") + + +if __name__ == '__main__': + suite = unittest.TestLoader().loadTestsFromTestCase(TestROSTools) + unittest.TextTestRunner(verbosity=2).run(suite)